diff --git a/.github/workflows/semgrep.yml b/.github/workflows/semgrep.yml new file mode 100644 index 00000000..4090692f --- /dev/null +++ b/.github/workflows/semgrep.yml @@ -0,0 +1,24 @@ +on: + pull_request: {} + workflow_dispatch: {} + push: + branches: + - main + - master + schedule: + - cron: '0 0 * * *' +name: Semgrep config +jobs: + semgrep: + name: semgrep/ci + runs-on: ubuntu-latest + env: + SEMGREP_APP_TOKEN: ${{ secrets.SEMGREP_APP_TOKEN }} + SEMGREP_URL: https://cloudflare.semgrep.dev + SEMGREP_APP_URL: https://cloudflare.semgrep.dev + SEMGREP_VERSION_CHECK_URL: https://cloudflare.semgrep.dev/api/check-version + container: + image: semgrep/semgrep + steps: + - uses: actions/checkout@v4 + - run: semgrep ci diff --git a/.gitignore b/.gitignore index c258e58c..2af7a1ed 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,4 @@ cscope.* ssh_server_tests/.env /.cover built_artifacts/ +component-tests/.venv diff --git a/.teamcity/install-cloudflare-go.sh b/.teamcity/install-cloudflare-go.sh index ae24c038..9677ed7e 100755 --- a/.teamcity/install-cloudflare-go.sh +++ b/.teamcity/install-cloudflare-go.sh @@ -3,6 +3,6 @@ cd /tmp git clone -q https://github.com/cloudflare/go cd go/src -# https://github.com/cloudflare/go/tree/ec0a014545f180b0c74dfd687698657a9e86e310 is version go1.22.2-devel-cf -git checkout -q ec0a014545f180b0c74dfd687698657a9e86e310 -./make.bash \ No newline at end of file +# https://github.com/cloudflare/go/tree/f4334cdc0c3f22a3bfdd7e66f387e3ffc65a5c38 is version go1.22.5-devel-cf +git checkout -q f4334cdc0c3f22a3bfdd7e66f387e3ffc65a5c38 +./make.bash diff --git a/.teamcity/windows/component-test.ps1 b/.teamcity/windows/component-test.ps1 index fe70738e..548fac6b 100644 --- a/.teamcity/windows/component-test.ps1 +++ b/.teamcity/windows/component-test.ps1 @@ -37,7 +37,7 @@ if ($LASTEXITCODE -ne 0) { throw "Failed unit tests" } Write-Output "Running component tests" -python -m pip --disable-pip-version-check install --upgrade -r component-tests/requirements.txt +python -m pip --disable-pip-version-check install --upgrade -r component-tests/requirements.txt --use-pep517 python component-tests/setup.py --type create python -m pytest component-tests -o log_cli=true --log-cli-level=INFO if ($LASTEXITCODE -ne 0) { diff --git a/.teamcity/windows/install-cloudflare-go.ps1 b/.teamcity/windows/install-cloudflare-go.ps1 index eedc7c15..6ff957b9 100644 --- a/.teamcity/windows/install-cloudflare-go.ps1 +++ b/.teamcity/windows/install-cloudflare-go.ps1 @@ -9,8 +9,8 @@ Set-Location "$Env:Temp" git clone -q https://github.com/cloudflare/go Write-Output "Building go..." cd go/src -# https://github.com/cloudflare/go/tree/ec0a014545f180b0c74dfd687698657a9e86e310 is version go1.22.2-devel-cf -git checkout -q ec0a014545f180b0c74dfd687698657a9e86e310 +# https://github.com/cloudflare/go/tree/f4334cdc0c3f22a3bfdd7e66f387e3ffc65a5c38 is version go1.22.5-devel-cf +git checkout -q f4334cdc0c3f22a3bfdd7e66f387e3ffc65a5c38 & ./make.bat -Write-Output "Installed" \ No newline at end of file +Write-Output "Installed" diff --git a/.teamcity/windows/install-go-msi.ps1 b/.teamcity/windows/install-go-msi.ps1 index cb5602c1..7756c1c4 100644 --- a/.teamcity/windows/install-go-msi.ps1 +++ b/.teamcity/windows/install-go-msi.ps1 @@ -1,6 +1,6 @@ $ErrorActionPreference = "Stop" $ProgressPreference = "SilentlyContinue" -$GoMsiVersion = "go1.22.2.windows-amd64.msi" +$GoMsiVersion = "go1.22.5.windows-amd64.msi" Write-Output "Downloading go installer..." @@ -17,4 +17,4 @@ Install-Package "$Env:Temp\$GoMsiVersion" -Force # Go installer updates global $PATH go env -Write-Output "Installed" \ No newline at end of file +Write-Output "Installed" diff --git a/CHANGES.md b/CHANGES.md index ba3cac48..2389511c 100644 --- a/CHANGES.md +++ b/CHANGES.md @@ -1,3 +1,15 @@ +## 2024.12.2 +### New Features +- This release introduces the ability to collect troubleshooting information from one instance of cloudflared running on the local machine. The command can be executed as `cloudflared tunnel diag`. + +## 2024.12.1 +### Notices +- The use of the `--metrics` is still honoured meaning that if this flag is set the metrics server will try to bind it, however, this version includes a change that makes the metrics server bind to a port with a semi-deterministic approach. If the metrics flag is not present the server will bind to the first available port of the range 20241 to 20245. In case of all ports being unavailable then the fallback is to bind to a random port. + +## 2024.10.0 +### Bug Fixes +- We fixed a bug related to `--grace-period`. Tunnels that use QUIC as transport weren't abiding by this waiting period before forcefully closing the connections to the edge. From now on, both QUIC and HTTP2 tunnels will wait for either the grace period to end (defaults to 30 seconds) or until the last in-flight request is handled. Users that wish to maintain the previous behavior should set `--grace-period` to 0 if `--protocol` is set to `quic`. This will force `cloudflared` to shutdown as soon as either SIGTERM or SIGINT is received. + ## 2024.2.1 ### Notices - Starting from this version, tunnel diagnostics will be enabled by default. This will allow the engineering team to remotely get diagnostics from cloudflared during debug activities. Users still have the capability to opt-out of this feature by defining `--management-diagnostics=false` (or env `TUNNEL_MANAGEMENT_DIAGNOSTICS`). diff --git a/Dockerfile b/Dockerfile index 639dc5ca..39307965 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,13 @@ # use a builder image for building cloudflare ARG TARGET_GOOS ARG TARGET_GOARCH -FROM golang:1.22.2 as builder +FROM golang:1.22.5 as builder ENV GO111MODULE=on \ CGO_ENABLED=0 \ TARGET_GOOS=${TARGET_GOOS} \ - TARGET_GOARCH=${TARGET_GOARCH} + TARGET_GOARCH=${TARGET_GOARCH} \ + CONTAINER_BUILD=1 + WORKDIR /go/src/github.com/cloudflare/cloudflared/ diff --git a/Dockerfile.amd64 b/Dockerfile.amd64 index f17969cb..d1cdbcbf 100644 --- a/Dockerfile.amd64 +++ b/Dockerfile.amd64 @@ -1,5 +1,5 @@ # use a builder image for building cloudflare -FROM golang:1.22.2 as builder +FROM golang:1.22.5 as builder ENV GO111MODULE=on \ CGO_ENABLED=0 diff --git a/Dockerfile.arm64 b/Dockerfile.arm64 index 67f4935d..0190bf63 100644 --- a/Dockerfile.arm64 +++ b/Dockerfile.arm64 @@ -1,5 +1,5 @@ # use a builder image for building cloudflare -FROM golang:1.22.2 as builder +FROM golang:1.22.5 as builder ENV GO111MODULE=on \ CGO_ENABLED=0 diff --git a/Makefile b/Makefile index 46fee2a9..c572954a 100644 --- a/Makefile +++ b/Makefile @@ -30,6 +30,10 @@ ifdef PACKAGE_MANAGER VERSION_FLAGS := $(VERSION_FLAGS) -X "github.com/cloudflare/cloudflared/cmd/cloudflared/updater.BuiltForPackageManager=$(PACKAGE_MANAGER)" endif +ifdef CONTAINER_BUILD + VERSION_FLAGS := $(VERSION_FLAGS) -X "github.com/cloudflare/cloudflared/metrics.Runtime=virtual" +endif + LINK_FLAGS := ifeq ($(FIPS), true) LINK_FLAGS := -linkmode=external -extldflags=-static $(LINK_FLAGS) @@ -165,9 +169,17 @@ cover: # Generate the HTML report that can be viewed from the browser in CI. $Q go tool cover -html ".cover/c.out" -o .cover/all.html -.PHONY: test-ssh-server -test-ssh-server: - docker-compose -f ssh_server_tests/docker-compose.yml up +.PHONY: fuzz +fuzz: + @go test -fuzz=FuzzIPDecoder -fuzztime=600s ./packet + @go test -fuzz=FuzzICMPDecoder -fuzztime=600s ./packet + @go test -fuzz=FuzzSessionWrite -fuzztime=600s ./quic/v3 + @go test -fuzz=FuzzSessionServe -fuzztime=600s ./quic/v3 + @go test -fuzz=FuzzRegistrationDatagram -fuzztime=600s ./quic/v3 + @go test -fuzz=FuzzPayloadDatagram -fuzztime=600s ./quic/v3 + @go test -fuzz=FuzzRegistrationResponseDatagram -fuzztime=600s ./quic/v3 + @go test -fuzz=FuzzNewIdentity -fuzztime=600s ./tracing + @go test -fuzz=FuzzNewAccessValidator -fuzztime=600s ./validation .PHONY: install-go install-go: diff --git a/README.md b/README.md index 7a3d3bbf..e0db8dc0 100644 --- a/README.md +++ b/README.md @@ -49,7 +49,7 @@ Once installed, you can authenticate `cloudflared` into your Cloudflare account ## TryCloudflare -Want to test Cloudflare Tunnel before adding a website to Cloudflare? You can do so with TryCloudflare using the documentation [available here](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/run-tunnel/trycloudflare). +Want to test Cloudflare Tunnel before adding a website to Cloudflare? You can do so with TryCloudflare using the documentation [available here](https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/do-more-with-tunnels/trycloudflare/). ## Deprecated versions diff --git a/RELEASE_NOTES b/RELEASE_NOTES index 77838d9b..dd036a8d 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -1,3 +1,85 @@ +2024.12.1 +- 2024-12-10 TUN-8795: update createrepo to createrepo_c to fix the release_pkgs.py script + +2024.12.0 +- 2024-12-09 TUN-8640: Add ICMP support for datagram V3 +- 2024-12-09 TUN-8789: make python package installation consistent +- 2024-12-06 TUN-8781: Add Trixie, drop Buster. Default to Bookworm +- 2024-12-05 TUN-8775: Make sure the session Close can only be called once +- 2024-12-04 TUN-8725: implement diagnostic procedure +- 2024-12-04 TUN-8767: include raw output from network collector in diagnostic zipfile +- 2024-12-04 TUN-8770: add cli configuration and tunnel configuration to diagnostic zipfile +- 2024-12-04 TUN-8768: add job report to diagnostic zipfile +- 2024-12-03 TUN-8726: implement compression routine to be used in diagnostic procedure +- 2024-12-03 TUN-8732: implement port selection algorithm +- 2024-12-03 TUN-8762: fix argument order when invoking tracert and modify network info output parsing. +- 2024-12-03 TUN-8769: fix k8s log collector arguments +- 2024-12-03 TUN-8727: extend client to include function to get cli configuration and tunnel configuration +- 2024-11-29 TUN-8729: implement network collection for diagnostic procedure +- 2024-11-29 TUN-8727: implement metrics, runtime, system, and tunnelstate in diagnostic http client +- 2024-11-27 TUN-8733: add log collection for docker +- 2024-11-27 TUN-8734: add log collection for kubernetes +- 2024-11-27 TUN-8640: Refactor ICMPRouter to support new ICMPResponders +- 2024-11-26 TUN-8735: add managed/local log collection +- 2024-11-25 TUN-8728: implement diag/tunnel endpoint +- 2024-11-25 TUN-8730: implement diag/configuration +- 2024-11-22 TUN-8737: update metrics server port selection +- 2024-11-22 TUN-8731: Implement diag/system endpoint +- 2024-11-21 TUN-8748: Migrated datagram V3 flows to use migrated context + +2024.11.1 +- 2024-11-18 Add cloudflared tunnel ready command +- 2024-11-14 Make metrics a requirement for tunnel ready command +- 2024-11-12 TUN-8701: Simplify flow registration logs for datagram v3 +- 2024-11-11 add: new go-fuzz targets +- 2024-11-07 TUN-8701: Add metrics and adjust logs for datagram v3 +- 2024-11-06 TUN-8709: Add session migration for datagram v3 +- 2024-11-04 Fixed 404 in README.md to TryCloudflare +- 2024-09-24 Update semgrep.yml + +2024.11.0 +- 2024-11-05 VULN-66059: remove ssh server tests +- 2024-11-04 TUN-8700: Add datagram v3 muxer +- 2024-11-04 TUN-8646: Allow experimental feature support for datagram v3 +- 2024-11-04 TUN-8641: Expose methods to simplify V3 Datagram parsing on the edge +- 2024-10-31 TUN-8708: Bump python min version to 3.10 +- 2024-10-31 TUN-8667: Add datagram v3 session manager +- 2024-10-25 TUN-8692: remove dashes from session id +- 2024-10-24 TUN-8694: Rework release script +- 2024-10-24 TUN-8661: Refactor connection methods to support future different datagram muxing methods +- 2024-07-22 TUN-8553: Bump go to 1.22.5 and go-boring 1.22.5-1 + +2024.10.1 +- 2024-10-23 TUN-8694: Fix github release script +- 2024-10-21 Revert "TUN-8592: Use metadata from the edge to determine if request body is empty for QUIC transport" +- 2024-10-18 TUN-8688: Correct UDP bind for IPv6 edge connectivity on macOS +- 2024-10-17 TUN-8685: Bump coredns dependency +- 2024-10-16 TUN-8638: Add datagram v3 serializers and deserializers +- 2024-10-15 chore: Remove h2mux code +- 2024-10-11 TUN-8631: Abort release on version mismatch + +2024.10.0 +- 2024-10-01 TUN-8646: Add datagram v3 support feature flag +- 2024-09-30 TUN-8621: Fix cloudflared version in change notes to account for release date +- 2024-09-19 Adding semgrep yaml file +- 2024-09-12 TUN-8632: Delay checking auto-update by the provided frequency +- 2024-09-11 TUN-8630: Check checksum of downloaded binary to compare to current for auto-updating +- 2024-09-09 TUN-8629: Cloudflared update on Windows requires running it twice to update +- 2024-09-06 PPIP-2310: Update quick tunnel disclaimer +- 2024-08-30 TUN-8621: Prevent QUIC connection from closing before grace period after unregistering +- 2024-08-09 TUN-8592: Use metadata from the edge to determine if request body is empty for QUIC transport +- 2024-06-26 TUN-8484: Print response when QuickTunnel can't be unmarshalled + +2024.9.1 +- 2024-09-10 Revert Release 2024.9.0 + +2024.9.0 +- 2024-09-10 TUN-8621: Fix cloudflared version in change notes. +- 2024-09-06 PPIP-2310: Update quick tunnel disclaimer +- 2024-08-30 TUN-8621: Prevent QUIC connection from closing before grace period after unregistering +- 2024-08-09 TUN-8592: Use metadata from the edge to determine if request body is empty for QUIC transport +- 2024-06-26 TUN-8484: Print response when QuickTunnel can't be unmarshalled + 2024.8.3 - 2024-08-15 TUN-8591 login command without extra text - 2024-03-25 remove code that will not be executed diff --git a/cfsetup.yaml b/cfsetup.yaml index 62e8de5d..05ffd20f 100644 --- a/cfsetup.yaml +++ b/cfsetup.yaml @@ -1,8 +1,9 @@ -pinned_go: &pinned_go go-boring=1.22.2-1 +pinned_go: &pinned_go go-boring=1.22.5-1 build_dir: &build_dir /cfsetup_build -default-flavor: bullseye -buster: &buster +default-flavor: bookworm + +bullseye: &bullseye build-linux: build_dir: *build_dir builddeps: &build_deps @@ -31,8 +32,8 @@ buster: &buster builddeps: *build_deps pre-cache: *build_pre_cache post-cache: - - make cover - # except FIPS and macos + - make cover + # except FIPS and macos build-linux-release: build_dir: *build_dir builddeps: &build_deps_release @@ -46,19 +47,17 @@ buster: &buster - python3-pip - python3-setuptools - wget - pre-cache: &build_release_pre_cache - - pip3 install pynacl==1.4.0 - - pip3 install pygithub==1.55 - - pip3 install boto3==1.22.9 - - pip3 install python-gnupg==0.4.9 + - python3-venv post-cache: + - python3 -m venv env + - . /cfsetup_build/env/bin/activate + - pip install pynacl==1.4.0 pygithub==1.55 boto3==1.22.9 python-gnupg==0.4.9 # build all packages (except macos and FIPS) and move them to /cfsetup/built_artifacts - ./build-packages.sh # handle FIPS separately so that we built with gofips compiler build-linux-fips-release: build_dir: *build_dir builddeps: *build_deps_release - pre-cache: *build_release_pre_cache post-cache: # same logic as above, but for FIPS packages only - ./build-packages-fips.sh @@ -110,7 +109,7 @@ buster: &buster - export GOOS=linux - export GOARCH=arm64 - export NIGHTLY=true - #- export FIPS=true # TUN-7595 + # - export FIPS=true # TUN-7595 - export ORIGINAL_NAME=true - make cloudflared-deb build-deb-arm64: @@ -133,12 +132,14 @@ buster: &buster # libmsi and libgcab are libraries the wixl binary depends on. - libmsi-dev - libgcab-dev + - python3-venv pre-cache: - wget https://github.com/sudarshan-reddy/msitools/releases/download/v0.101b/wixl -P /usr/local/bin - chmod a+x /usr/local/bin/wixl - - pip3 install pynacl==1.4.0 - - pip3 install pygithub==1.55 post-cache: + - python3 -m venv env + - . env/bin/activate + - pip install pynacl==1.4.0 pygithub==1.55 - .teamcity/package-windows.sh test: build_dir: *build_dir @@ -172,18 +173,22 @@ buster: &buster build_dir: *build_dir builddeps: &build_deps_component_test - *pinned_go - - python3.7 + - python3 - python3-pip - python3-setuptools - # procps installs the ps command which is needed in test_sysv_service because the init script - # uses ps pid to determine if the agent is running + # procps installs the ps command which is needed in test_sysv_service + # because the init script uses ps pid to determine if the agent is + # running - procps + - python3-venv pre-cache-copy-paths: - component-tests/requirements.txt - pre-cache: &component_test_pre_cache - - sudo pip3 install --upgrade -r component-tests/requirements.txt post-cache: &component_test_post_cache - # Creates and routes a Named Tunnel for this build. Also constructs config file from env vars. + - python3 -m venv env + - . env/bin/activate + - pip install --upgrade -r component-tests/requirements.txt + # Creates and routes a Named Tunnel for this build. Also constructs + # config file from env vars. - python3 component-tests/setup.py --type create - pytest component-tests -o log_cli=true --log-cli-level=INFO # The Named Tunnel is deleted and its route unprovisioned here. @@ -193,7 +198,6 @@ buster: &buster builddeps: *build_deps_component_test pre-cache-copy-paths: - component-tests/requirements.txt - pre-cache: *component_test_pre_cache post-cache: *component_test_post_cache github-release-dryrun: build_dir: *build_dir @@ -204,10 +208,11 @@ buster: &buster - libffi-dev - python3-setuptools - python3-pip - pre-cache: - - pip3 install pynacl==1.4.0 - - pip3 install pygithub==1.55 + - python3-venv post-cache: + - python3 -m venv env + - . env/bin/activate + - pip install pynacl==1.4.0 pygithub==1.55 - make github-release-dryrun github-release: build_dir: *build_dir @@ -218,10 +223,11 @@ buster: &buster - libffi-dev - python3-setuptools - python3-pip - pre-cache: - - pip3 install pynacl==1.4.0 - - pip3 install pygithub==1.55 + - python3-venv post-cache: + - python3 -m venv env + - . env/bin/activate + - pip install pynacl==1.4.0 pygithub==1.55 - make github-release r2-linux-release: build_dir: *build_dir @@ -237,14 +243,13 @@ buster: &buster - python3-setuptools - python3-pip - reprepro - - createrepo - pre-cache: - - pip3 install pynacl==1.4.0 - - pip3 install pygithub==1.55 - - pip3 install boto3==1.22.9 - - pip3 install python-gnupg==0.4.9 + - createrepo-c + - python3-venv post-cache: + - python3 -m venv env + - . env/bin/activate + - pip install pynacl==1.4.0 pygithub==1.55 boto3==1.22.9 python-gnupg==0.4.9 - make r2-linux-release -bullseye: *buster -bookworm: *buster +bookworm: *bullseye +trixie: *bullseye diff --git a/cmd/cloudflared/cliutil/build_info.go b/cmd/cloudflared/cliutil/build_info.go index fff4febf..78ef775a 100644 --- a/cmd/cloudflared/cliutil/build_info.go +++ b/cmd/cloudflared/cliutil/build_info.go @@ -1,7 +1,10 @@ package cliutil import ( + "crypto/sha256" "fmt" + "io" + "os" "runtime" "github.com/rs/zerolog" @@ -13,6 +16,7 @@ type BuildInfo struct { GoArch string `json:"go_arch"` BuildType string `json:"build_type"` CloudflaredVersion string `json:"cloudflared_version"` + Checksum string `json:"checksum"` } func GetBuildInfo(buildType, version string) *BuildInfo { @@ -22,11 +26,12 @@ func GetBuildInfo(buildType, version string) *BuildInfo { GoArch: runtime.GOARCH, BuildType: buildType, CloudflaredVersion: version, + Checksum: currentBinaryChecksum(), } } func (bi *BuildInfo) Log(log *zerolog.Logger) { - log.Info().Msgf("Version %s", bi.CloudflaredVersion) + log.Info().Msgf("Version %s (Checksum %s)", bi.CloudflaredVersion, bi.Checksum) if bi.BuildType != "" { log.Info().Msgf("Built%s", bi.GetBuildTypeMsg()) } @@ -51,3 +56,28 @@ func (bi *BuildInfo) GetBuildTypeMsg() string { func (bi *BuildInfo) UserAgent() string { return fmt.Sprintf("cloudflared/%s", bi.CloudflaredVersion) } + +// FileChecksum opens a file and returns the SHA256 checksum. +func FileChecksum(filePath string) (string, error) { + f, err := os.Open(filePath) + if err != nil { + return "", err + } + defer f.Close() + + h := sha256.New() + if _, err := io.Copy(h, f); err != nil { + return "", err + } + + return fmt.Sprintf("%x", h.Sum(nil)), nil +} + +func currentBinaryChecksum() string { + currentPath, err := os.Executable() + if err != nil { + return "" + } + sum, _ := FileChecksum(currentPath) + return sum +} diff --git a/cmd/cloudflared/main.go b/cmd/cloudflared/main.go index 61d2c41d..b0b93cf8 100644 --- a/cmd/cloudflared/main.go +++ b/cmd/cloudflared/main.go @@ -91,7 +91,7 @@ func main() { tunnel.Init(bInfo, graceShutdownC) // we need this to support the tunnel sub command... access.Init(graceShutdownC, Version) - updater.Init(Version) + updater.Init(bInfo) tracing.Init(Version) token.Init(Version) tail.Init(bInfo) diff --git a/cmd/cloudflared/tunnel/cmd.go b/cmd/cloudflared/tunnel/cmd.go index 92f20bd7..aa678231 100644 --- a/cmd/cloudflared/tunnel/cmd.go +++ b/cmd/cloudflared/tunnel/cmd.go @@ -6,6 +6,7 @@ import ( "fmt" "net/url" "os" + "path/filepath" "runtime/trace" "strings" "sync" @@ -28,6 +29,7 @@ import ( "github.com/cloudflare/cloudflared/config" "github.com/cloudflare/cloudflared/connection" "github.com/cloudflare/cloudflared/credentials" + "github.com/cloudflare/cloudflared/diagnostic" "github.com/cloudflare/cloudflared/edgediscovery" "github.com/cloudflare/cloudflared/features" "github.com/cloudflare/cloudflared/ingress" @@ -39,6 +41,7 @@ import ( "github.com/cloudflare/cloudflared/supervisor" "github.com/cloudflare/cloudflared/tlsconfig" "github.com/cloudflare/cloudflared/tunneldns" + "github.com/cloudflare/cloudflared/tunnelstate" "github.com/cloudflare/cloudflared/validation" ) @@ -125,6 +128,94 @@ var ( "most likely you already have a conflicting record there. You can also rerun this command with --%s to overwrite "+ "any existing DNS records for this hostname.", overwriteDNSFlag) deprecatedClassicTunnelErr = fmt.Errorf("Classic tunnels have been deprecated, please use Named Tunnels. (https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/)") + // TODO: TUN-8756 the list below denotes the flags that do not possess any kind of sensitive information + // however this approach is not maintainble in the long-term. + nonSecretFlagsList = []string{ + "config", + "autoupdate-freq", + "no-autoupdate", + "metrics", + "pidfile", + "url", + "hello-world", + "socks5", + "proxy-connect-timeout", + "proxy-tls-timeout", + "proxy-tcp-keepalive", + "proxy-no-happy-eyeballs", + "proxy-keepalive-connections", + "proxy-keepalive-timeout", + "proxy-connection-timeout", + "proxy-expect-continue-timeout", + "http-host-header", + "origin-server-name", + "unix-socket", + "origin-ca-pool", + "no-tls-verify", + "no-chunked-encoding", + "http2-origin", + "management-hostname", + "service-op-ip", + "local-ssh-port", + "ssh-idle-timeout", + "ssh-max-timeout", + "bucket-name", + "region-name", + "s3-url-host", + "host-key-path", + "ssh-server", + "bastion", + "proxy-address", + "proxy-port", + "loglevel", + "transport-loglevel", + "logfile", + "log-directory", + "trace-output", + "proxy-dns", + "proxy-dns-port", + "proxy-dns-address", + "proxy-dns-upstream", + "proxy-dns-max-upstream-conns", + "proxy-dns-bootstrap", + "is-autoupdated", + "edge", + "region", + "edge-ip-version", + "edge-bind-address", + "cacert", + "hostname", + "id", + "lb-pool", + "api-url", + "metrics-update-freq", + "tag", + "heartbeat-interval", + "heartbeat-count", + "max-edge-addr-retries", + "retries", + "ha-connections", + "rpc-timeout", + "write-stream-timeout", + "quic-disable-pmtu-discovery", + "quic-connection-level-flow-control-limit", + "quic-stream-level-flow-control-limit", + "label", + "grace-period", + "compression-quality", + "use-reconnect-token", + "dial-edge-timeout", + "stdin-control", + "name", + "ui", + "quick-service", + "max-fetch-size", + "post-quantum", + "management-diagnostics", + "protocol", + "overwrite-dns", + "help", + } ) func Flags() []cli.Flag { @@ -139,11 +230,13 @@ func Commands() []*cli.Command { buildVirtualNetworkSubcommand(false), buildRunCommand(), buildListCommand(), + buildReadyCommand(), buildInfoCommand(), buildIngressSubcommand(), buildDeleteCommand(), buildCleanupCommand(), buildTokenCommand(), + buildDiagCommand(), // for compatibility, allow following as tunnel subcommands proxydns.Command(true), cliutil.RemovedCommand("db-connect"), @@ -419,7 +512,7 @@ func StartServer( // Disable ICMP packet routing for quick tunnels if quickTunnelURL != "" { - tunnelConfig.PacketConfig = nil + tunnelConfig.ICMPRouterServer = nil } internalRules := []ingress.Rule{} @@ -447,19 +540,42 @@ func StartServer( return err } - metricsListener, err := listeners.Listen("tcp", c.String("metrics")) + metricsListener, err := metrics.CreateMetricsListener(&listeners, c.String("metrics")) if err != nil { log.Err(err).Msg("Error opening metrics server listener") return errors.Wrap(err, "Error opening metrics server listener") } + defer metricsListener.Close() wg.Add(1) + go func() { defer wg.Done() - readinessServer := metrics.NewReadyServer(log, clientID) - observer.RegisterSink(readinessServer) + tracker := tunnelstate.NewConnTracker(log) + observer.RegisterSink(tracker) + + ipv4, ipv6, err := determineICMPSources(c, log) + sources := make([]string, 0) + if err == nil { + sources = append(sources, ipv4.String()) + sources = append(sources, ipv6.String()) + } + + readinessServer := metrics.NewReadyServer(clientID, tracker) + cliFlags := nonSecretCliFlags(log, c, nonSecretFlagsList) + diagnosticHandler := diagnostic.NewDiagnosticHandler( + log, + 0, + diagnostic.NewSystemCollectorImpl(buildInfo.CloudflaredVersion), + tunnelConfig.NamedTunnel.Credentials.TunnelID, + clientID, + tracker, + cliFlags, + sources, + ) metricsConfig := metrics.Config{ ReadyServer: readinessServer, + DiagnosticHandler: diagnosticHandler, QuickTunnelHostname: quickTunnelURL, Orchestrator: orchestrator, } @@ -856,9 +972,15 @@ func configureCloudflaredFlags(shouldHide bool) []cli.Flag { Hidden: shouldHide, }), altsrc.NewStringFlag(&cli.StringFlag{ - Name: "metrics", - Value: "localhost:", - Usage: "Listen address for metrics reporting.", + Name: "metrics", + Value: metrics.GetMetricsDefaultAddress(metrics.Runtime), + Usage: fmt.Sprintf( + `Listen address for metrics reporting. If no address is passed cloudflared will try to bind to %v. +If all are unavailable, a random port will be used. Note that when running cloudflared from an virtual +environment the default address binds to all interfaces, hence, it is important to isolate the host +and virtualized host network stacks from each other`, + metrics.GetMetricsKnownAddresses(metrics.Runtime), + ), EnvVars: []string{"TUNNEL_METRICS"}, Hidden: shouldHide, }), @@ -1189,3 +1311,46 @@ reconnect [delay] } } } + +func nonSecretCliFlags(log *zerolog.Logger, cli *cli.Context, flagInclusionList []string) map[string]string { + flagsNames := cli.FlagNames() + flags := make(map[string]string, len(flagsNames)) + + for _, flag := range flagsNames { + value := cli.String(flag) + + if value == "" { + continue + } + + isIncluded := isFlagIncluded(flagInclusionList, flag) + if !isIncluded { + continue + } + + switch flag { + case logger.LogDirectoryFlag, logger.LogFileFlag: + { + absolute, err := filepath.Abs(value) + if err != nil { + log.Error().Err(err).Msgf("could not convert %s path to absolute", flag) + } else { + flags[flag] = absolute + } + } + default: + flags[flag] = value + } + } + return flags +} + +func isFlagIncluded(flagInclusionList []string, flag string) bool { + for _, include := range flagInclusionList { + if include == flag { + return true + } + } + + return false +} diff --git a/cmd/cloudflared/tunnel/configuration.go b/cmd/cloudflared/tunnel/configuration.go index 727f4f90..e04a1c76 100644 --- a/cmd/cloudflared/tunnel/configuration.go +++ b/cmd/cloudflared/tunnel/configuration.go @@ -252,11 +252,11 @@ func prepareTunnelConfig( QUICConnectionLevelFlowControlLimit: c.Uint64(quicConnLevelFlowControlLimit), QUICStreamLevelFlowControlLimit: c.Uint64(quicStreamLevelFlowControlLimit), } - packetConfig, err := newPacketConfig(c, log) + icmpRouter, err := newICMPRouter(c, log) if err != nil { log.Warn().Err(err).Msg("ICMP proxy feature is disabled") } else { - tunnelConfig.PacketConfig = packetConfig + tunnelConfig.ICMPRouterServer = icmpRouter } orchestratorConfig := &orchestration.Config{ Ingress: &ingressRules, @@ -351,33 +351,39 @@ func adjustIPVersionByBindAddress(ipVersion allregions.ConfigIPVersion, ip net.I } } -func newPacketConfig(c *cli.Context, logger *zerolog.Logger) (*ingress.GlobalRouterConfig, error) { +func newICMPRouter(c *cli.Context, logger *zerolog.Logger) (ingress.ICMPRouterServer, error) { + ipv4Src, ipv6Src, err := determineICMPSources(c, logger) + if err != nil { + return nil, err + } + + icmpRouter, err := ingress.NewICMPRouter(ipv4Src, ipv6Src, logger, icmpFunnelTimeout) + if err != nil { + return nil, err + } + return icmpRouter, nil +} + +func determineICMPSources(c *cli.Context, logger *zerolog.Logger) (netip.Addr, netip.Addr, error) { ipv4Src, err := determineICMPv4Src(c.String("icmpv4-src"), logger) if err != nil { - return nil, errors.Wrap(err, "failed to determine IPv4 source address for ICMP proxy") + return netip.Addr{}, netip.Addr{}, errors.Wrap(err, "failed to determine IPv4 source address for ICMP proxy") } + logger.Info().Msgf("ICMP proxy will use %s as source for IPv4", ipv4Src) ipv6Src, zone, err := determineICMPv6Src(c.String("icmpv6-src"), logger, ipv4Src) if err != nil { - return nil, errors.Wrap(err, "failed to determine IPv6 source address for ICMP proxy") + return netip.Addr{}, netip.Addr{}, errors.Wrap(err, "failed to determine IPv6 source address for ICMP proxy") } + if zone != "" { logger.Info().Msgf("ICMP proxy will use %s in zone %s as source for IPv6", ipv6Src, zone) } else { logger.Info().Msgf("ICMP proxy will use %s as source for IPv6", ipv6Src) } - icmpRouter, err := ingress.NewICMPRouter(ipv4Src, ipv6Src, zone, logger, icmpFunnelTimeout) - if err != nil { - return nil, err - } - return &ingress.GlobalRouterConfig{ - ICMPRouter: icmpRouter, - IPv4Src: ipv4Src, - IPv6Src: ipv6Src, - Zone: zone, - }, nil + return ipv4Src, ipv6Src, nil } func determineICMPv4Src(userDefinedSrc string, logger *zerolog.Logger) (netip.Addr, error) { @@ -407,13 +413,12 @@ type interfaceIP struct { func determineICMPv6Src(userDefinedSrc string, logger *zerolog.Logger, ipv4Src netip.Addr) (addr netip.Addr, zone string, err error) { if userDefinedSrc != "" { - userDefinedIP, zone, _ := strings.Cut(userDefinedSrc, "%") - addr, err := netip.ParseAddr(userDefinedIP) + addr, err := netip.ParseAddr(userDefinedSrc) if err != nil { return netip.Addr{}, "", err } if addr.Is6() { - return addr, zone, nil + return addr, addr.Zone(), nil } return netip.Addr{}, "", fmt.Errorf("expect IPv6, but %s is IPv4", userDefinedSrc) } diff --git a/cmd/cloudflared/tunnel/quick_tunnel.go b/cmd/cloudflared/tunnel/quick_tunnel.go index 64013e58..ee438450 100644 --- a/cmd/cloudflared/tunnel/quick_tunnel.go +++ b/cmd/cloudflared/tunnel/quick_tunnel.go @@ -3,6 +3,7 @@ package tunnel import ( "encoding/json" "fmt" + "io" "net/http" "strings" "time" @@ -15,10 +16,7 @@ import ( const httpTimeout = 15 * time.Second -const disclaimer = "Thank you for trying Cloudflare Tunnel. Doing so, without a Cloudflare account, is a quick way to" + - " experiment and try it out. However, be aware that these account-less Tunnels have no uptime guarantee. If you " + - "intend to use Tunnels in production you should use a pre-created named tunnel by following: " + - "https://developers.cloudflare.com/cloudflare-one/connections/connect-apps" +const disclaimer = "Thank you for trying Cloudflare Tunnel. Doing so, without a Cloudflare account, is a quick way to experiment and try it out. However, be aware that these account-less Tunnels have no uptime guarantee, are subject to the Cloudflare Online Services Terms of Use (https://www.cloudflare.com/website-terms/), and Cloudflare reserves the right to investigate your use of Tunnels for violations of such terms. If you intend to use Tunnels in production you should use a pre-created named tunnel by following: https://developers.cloudflare.com/cloudflare-one/connections/connect-apps" // RunQuickTunnel requests a tunnel from the specified service. // We use this to power quick tunnels on trycloudflare.com, but the @@ -47,8 +45,17 @@ func RunQuickTunnel(sc *subcommandContext) error { } defer resp.Body.Close() + // This will read the entire response into memory so we can print it in case of error + rsp_body, err := io.ReadAll(resp.Body) + if err != nil { + return errors.Wrap(err, "failed to read quick-tunnel response") + } + var data QuickTunnelResponse - if err := json.NewDecoder(resp.Body).Decode(&data); err != nil { + if err := json.Unmarshal(rsp_body, &data); err != nil { + rsp_string := string(rsp_body) + fields := map[string]interface{}{"status_code": resp.Status} + sc.log.Err(err).Fields(fields).Msgf("Error unmarshaling QuickTunnel response: %s", rsp_string) return errors.Wrap(err, "failed to unmarshal quick Tunnel") } diff --git a/cmd/cloudflared/tunnel/subcommands.go b/cmd/cloudflared/tunnel/subcommands.go index 0c49e3e7..8bc5ec3a 100644 --- a/cmd/cloudflared/tunnel/subcommands.go +++ b/cmd/cloudflared/tunnel/subcommands.go @@ -5,6 +5,8 @@ import ( "encoding/base64" "encoding/json" "fmt" + "io" + "net/http" "os" "path/filepath" "regexp" @@ -26,17 +28,27 @@ import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/updater" "github.com/cloudflare/cloudflared/config" "github.com/cloudflare/cloudflared/connection" + "github.com/cloudflare/cloudflared/diagnostic" + "github.com/cloudflare/cloudflared/metrics" ) const ( - allSortByOptions = "name, id, createdAt, deletedAt, numConnections" - connsSortByOptions = "id, startedAt, numConnections, version" - CredFileFlagAlias = "cred-file" - CredFileFlag = "credentials-file" - CredContentsFlag = "credentials-contents" - TunnelTokenFlag = "token" - TunnelTokenFileFlag = "token-file" - overwriteDNSFlagName = "overwrite-dns" + allSortByOptions = "name, id, createdAt, deletedAt, numConnections" + connsSortByOptions = "id, startedAt, numConnections, version" + CredFileFlagAlias = "cred-file" + CredFileFlag = "credentials-file" + CredContentsFlag = "credentials-contents" + TunnelTokenFlag = "token" + TunnelTokenFileFlag = "token-file" + overwriteDNSFlagName = "overwrite-dns" + noDiagLogsFlagName = "no-diag-logs" + noDiagMetricsFlagName = "no-diag-metrics" + noDiagSystemFlagName = "no-diag-system" + noDiagRuntimeFlagName = "no-diag-runtime" + noDiagNetworkFlagName = "no-diag-network" + diagContainerIDFlagName = "diag-container-id" + diagPodFlagName = "diag-pod-id" + metricsFlagName = "metrics" LogFieldTunnelID = "tunnelID" ) @@ -183,6 +195,46 @@ var ( Usage: "Source address and the interface name to send/receive ICMPv6 messages. If not provided cloudflared will dial a local address to determine the source IP or fallback to ::.", EnvVars: []string{"TUNNEL_ICMPV6_SRC"}, } + metricsFlag = &cli.StringFlag{ + Name: metricsFlagName, + Usage: "The metrics server address i.e.: 127.0.0.1:12345. If your instance is running in a Docker/Kubernetes environment you need to setup port forwarding for your application.", + Value: "", + } + diagContainerFlag = &cli.StringFlag{ + Name: diagContainerIDFlagName, + Usage: "Container ID or Name to collect logs from", + Value: "", + } + diagPodFlag = &cli.StringFlag{ + Name: diagPodFlagName, + Usage: "Kubernetes POD to collect logs from", + Value: "", + } + noDiagLogsFlag = &cli.BoolFlag{ + Name: noDiagLogsFlagName, + Usage: "Log collection will not be performed", + Value: false, + } + noDiagMetricsFlag = &cli.BoolFlag{ + Name: noDiagMetricsFlagName, + Usage: "Metric collection will not be performed", + Value: false, + } + noDiagSystemFlag = &cli.BoolFlag{ + Name: noDiagSystemFlagName, + Usage: "System information collection will not be performed", + Value: false, + } + noDiagRuntimeFlag = &cli.BoolFlag{ + Name: noDiagRuntimeFlagName, + Usage: "Runtime information collection will not be performed", + Value: false, + } + noDiagNetworkFlag = &cli.BoolFlag{ + Name: noDiagNetworkFlagName, + Usage: "Network diagnostics won't be performed", + Value: false, + } ) func buildCreateCommand() *cli.Command { @@ -379,7 +431,6 @@ func formatAndPrintTunnelList(tunnels []*cfapi.Tunnel, showRecentlyDisconnected } func fmtConnections(connections []cfapi.Connection, showRecentlyDisconnected bool) string { - // Count connections per colo numConnsPerColo := make(map[string]uint, len(connections)) for _, connection := range connections { @@ -403,6 +454,39 @@ func fmtConnections(connections []cfapi.Connection, showRecentlyDisconnected boo return strings.Join(output, ", ") } +func buildReadyCommand() *cli.Command { + return &cli.Command{ + Name: "ready", + Action: cliutil.ConfiguredAction(readyCommand), + Usage: "Call /ready endpoint and return proper exit code", + UsageText: "cloudflared tunnel [tunnel command options] ready [subcommand options]", + Description: "cloudflared tunnel ready will return proper exit code based on the /ready endpoint", + Flags: []cli.Flag{}, + CustomHelpTemplate: commandHelpTemplate(), + } +} + +func readyCommand(c *cli.Context) error { + metricsOpts := c.String("metrics") + if !c.IsSet("metrics") { + return fmt.Errorf("--metrics has to be provided") + } + + requestURL := fmt.Sprintf("http://%s/ready", metricsOpts) + res, err := http.Get(requestURL) + if err != nil { + return err + } + if res.StatusCode != 200 { + body, err := io.ReadAll(res.Body) + if err != nil { + return err + } + return fmt.Errorf("http://%s/ready endpoint returned status code %d\n%s", metricsOpts, res.StatusCode, body) + } + return nil +} + func buildInfoCommand() *cli.Command { return &cli.Command{ Name: "info", @@ -882,8 +966,10 @@ func lbRouteFromArg(c *cli.Context) (cfapi.HostnameRoute, error) { return cfapi.NewLBRoute(lbName, lbPool), nil } -var nameRegex = regexp.MustCompile("^[_a-zA-Z0-9][-_.a-zA-Z0-9]*$") -var hostNameRegex = regexp.MustCompile("^[*_a-zA-Z0-9][-_.a-zA-Z0-9]*$") +var ( + nameRegex = regexp.MustCompile("^[_a-zA-Z0-9][-_.a-zA-Z0-9]*$") + hostNameRegex = regexp.MustCompile("^[*_a-zA-Z0-9][-_.a-zA-Z0-9]*$") +) func validateName(s string, allowWildcardSubdomain bool) bool { if allowWildcardSubdomain { @@ -971,3 +1057,78 @@ SUBCOMMAND OPTIONS: ` return fmt.Sprintf(template, parentFlagsHelp) } + +func buildDiagCommand() *cli.Command { + return &cli.Command{ + Name: "diag", + Action: cliutil.ConfiguredAction(diagCommand), + Usage: "Creates a diagnostic report from a local cloudflared instance", + UsageText: "cloudflared tunnel [tunnel command options] diag [subcommand options]", + Description: "cloudflared tunnel diag will create a diagnostic report of a local cloudflared instance. The diagnostic procedure collects: logs, metrics, system information, traceroute to Cloudflare Edge, and runtime information. Since there may be multiple instances of cloudflared running the --metrics option may be provided to target a specific instance.", + Flags: []cli.Flag{ + metricsFlag, + diagContainerFlag, + diagPodFlag, + noDiagLogsFlag, + noDiagMetricsFlag, + noDiagSystemFlag, + noDiagRuntimeFlag, + noDiagNetworkFlag, + }, + CustomHelpTemplate: commandHelpTemplate(), + } +} + +func diagCommand(ctx *cli.Context) error { + sctx, err := newSubcommandContext(ctx) + if err != nil { + return err + } + log := sctx.log + options := diagnostic.Options{ + KnownAddresses: metrics.GetMetricsKnownAddresses(metrics.Runtime), + Address: sctx.c.String(metricsFlagName), + ContainerID: sctx.c.String(diagContainerIDFlagName), + PodID: sctx.c.String(diagPodFlagName), + Toggles: diagnostic.Toggles{ + NoDiagLogs: sctx.c.Bool(noDiagLogsFlagName), + NoDiagMetrics: sctx.c.Bool(noDiagMetricsFlagName), + NoDiagSystem: sctx.c.Bool(noDiagSystemFlagName), + NoDiagRuntime: sctx.c.Bool(noDiagRuntimeFlagName), + NoDiagNetwork: sctx.c.Bool(noDiagNetworkFlagName), + }, + } + + if options.Address == "" { + log.Info().Msg("If your instance is running in a Docker/Kubernetes environment you need to setup port forwarding for your application.") + } + + states, err := diagnostic.RunDiagnostic(log, options) + + if errors.Is(err, diagnostic.ErrMetricsServerNotFound) { + log.Warn().Msg("No instances found") + return nil + } + if errors.Is(err, diagnostic.ErrMultipleMetricsServerFound) { + if states != nil { + log.Info().Msgf("Found multiple instances running:") + for _, state := range states { + log.Info().Msgf("Instance: tunnel-id=%s connector-id=%s metrics-address=%s", state.TunnelID, state.ConnectorID, state.URL.String()) + } + log.Info().Msgf("To select one instance use the option --metrics") + } + return nil + } + + if errors.Is(err, diagnostic.ErrLogConfigurationIsInvalid) { + log.Info().Msg("Couldn't extract logs from the instance. If the instance is running in a containerized environment use the option --diag-container-id or --diag-pod-id. If there is no logging configuration use --no-diag-logs.") + } + + if err != nil { + log.Warn().Msg("Diagnostic completed with one or more errors") + } else { + log.Info().Msg("Diagnostic completed") + } + + return nil +} diff --git a/cmd/cloudflared/updater/update.go b/cmd/cloudflared/updater/update.go index 07b382f5..1d3cbc2e 100644 --- a/cmd/cloudflared/updater/update.go +++ b/cmd/cloudflared/updater/update.go @@ -14,6 +14,7 @@ import ( "github.com/urfave/cli/v2" "golang.org/x/term" + "github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil" "github.com/cloudflare/cloudflared/config" "github.com/cloudflare/cloudflared/logger" ) @@ -31,7 +32,7 @@ const ( ) var ( - version string + buildInfo *cliutil.BuildInfo BuiltForPackageManager = "" ) @@ -81,8 +82,8 @@ func (uo *UpdateOutcome) noUpdate() bool { return uo.Error == nil && uo.Updated == false } -func Init(v string) { - version = v +func Init(info *cliutil.BuildInfo) { + buildInfo = info } func CheckForUpdate(options updateOptions) (CheckResult, error) { @@ -100,11 +101,12 @@ func CheckForUpdate(options updateOptions) (CheckResult, error) { cfdPath = encodeWindowsPath(cfdPath) } - s := NewWorkersService(version, url, cfdPath, Options{IsBeta: options.isBeta, + s := NewWorkersService(buildInfo.CloudflaredVersion, url, cfdPath, Options{IsBeta: options.isBeta, IsForced: options.isForced, RequestedVersion: options.intendedVersion}) return s.Check() } + func encodeWindowsPath(path string) string { // We do this because Windows allows spaces in directories such as // Program Files but does not allow these directories to be spaced in batch files. @@ -196,10 +198,9 @@ func loggedUpdate(log *zerolog.Logger, options updateOptions) UpdateOutcome { // AutoUpdater periodically checks for new version of cloudflared. type AutoUpdater struct { - configurable *configurable - listeners *gracenet.Net - updateConfigChan chan *configurable - log *zerolog.Logger + configurable *configurable + listeners *gracenet.Net + log *zerolog.Logger } // AutoUpdaterConfigurable is the attributes of AutoUpdater that can be reconfigured during runtime @@ -210,10 +211,9 @@ type configurable struct { func NewAutoUpdater(updateDisabled bool, freq time.Duration, listeners *gracenet.Net, log *zerolog.Logger) *AutoUpdater { return &AutoUpdater{ - configurable: createUpdateConfig(updateDisabled, freq, log), - listeners: listeners, - updateConfigChan: make(chan *configurable), - log: log, + configurable: createUpdateConfig(updateDisabled, freq, log), + listeners: listeners, + log: log, } } @@ -232,12 +232,20 @@ func createUpdateConfig(updateDisabled bool, freq time.Duration, log *zerolog.Lo } } +// Run will perodically check for cloudflared updates, download them, and then restart the current cloudflared process +// to use the new version. It delays the first update check by the configured frequency as to not attempt a +// download immediately and restart after starting (in the case that there is an upgrade available). func (a *AutoUpdater) Run(ctx context.Context) error { ticker := time.NewTicker(a.configurable.freq) for { + select { + case <-ctx.Done(): + return ctx.Err() + case <-ticker.C: + } updateOutcome := loggedUpdate(a.log, updateOptions{updateDisabled: !a.configurable.enabled}) if updateOutcome.Updated { - Init(updateOutcome.Version) + buildInfo.CloudflaredVersion = updateOutcome.Version if IsSysV() { // SysV doesn't have a mechanism to keep service alive, we have to restart the process a.log.Info().Msg("Restarting service managed by SysV...") @@ -254,25 +262,9 @@ func (a *AutoUpdater) Run(ctx context.Context) error { } else if updateOutcome.UserMessage != "" { a.log.Warn().Msg(updateOutcome.UserMessage) } - - select { - case <-ctx.Done(): - return ctx.Err() - case newConfigurable := <-a.updateConfigChan: - ticker.Stop() - a.configurable = newConfigurable - ticker = time.NewTicker(a.configurable.freq) - // Check if there is new version of cloudflared after receiving new AutoUpdaterConfigurable - case <-ticker.C: - } } } -// Update is the method to pass new AutoUpdaterConfigurable to a running AutoUpdater. It is safe to be called concurrently -func (a *AutoUpdater) Update(updateDisabled bool, newFreq time.Duration) { - a.updateConfigChan <- createUpdateConfig(updateDisabled, newFreq, a.log) -} - func isAutoupdateEnabled(log *zerolog.Logger, updateDisabled bool, updateFreq time.Duration) bool { if !supportAutoUpdate(log) { return false diff --git a/cmd/cloudflared/updater/update_test.go b/cmd/cloudflared/updater/update_test.go index f977b96e..3159f7ab 100644 --- a/cmd/cloudflared/updater/update_test.go +++ b/cmd/cloudflared/updater/update_test.go @@ -9,8 +9,14 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/urfave/cli/v2" + + "github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil" ) +func init() { + Init(cliutil.GetBuildInfo("TEST", "TEST")) +} + func TestDisabledAutoUpdater(t *testing.T) { listeners := &gracenet.Net{} log := zerolog.Nop() diff --git a/cmd/cloudflared/updater/workers_service.go b/cmd/cloudflared/updater/workers_service.go index 4b52571c..b5883f1f 100644 --- a/cmd/cloudflared/updater/workers_service.go +++ b/cmd/cloudflared/updater/workers_service.go @@ -3,6 +3,7 @@ package updater import ( "encoding/json" "errors" + "fmt" "net/http" "runtime" ) @@ -79,6 +80,10 @@ func (s *WorkersService) Check() (CheckResult, error) { } defer resp.Body.Close() + if resp.StatusCode != 200 { + return nil, fmt.Errorf("unable to check for update: %d", resp.StatusCode) + } + var v VersionResponse if err := json.NewDecoder(resp.Body).Decode(&v); err != nil { return nil, err diff --git a/cmd/cloudflared/updater/workers_update.go b/cmd/cloudflared/updater/workers_update.go index b2d451a9..b7a86ff1 100644 --- a/cmd/cloudflared/updater/workers_update.go +++ b/cmd/cloudflared/updater/workers_update.go @@ -3,7 +3,6 @@ package updater import ( "archive/tar" "compress/gzip" - "crypto/sha256" "errors" "fmt" "io" @@ -16,6 +15,10 @@ import ( "strings" "text/template" "time" + + "github.com/getsentry/sentry-go" + + "github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil" ) const ( @@ -27,9 +30,9 @@ const ( // start the service // exit with code 0 if we've reached this point indicating success. windowsUpdateCommandTemplate = `sc stop cloudflared >nul 2>&1 +del "{{.OldPath}}" rename "{{.TargetPath}}" {{.OldName}} rename "{{.NewPath}}" {{.BinaryName}} -del "{{.OldPath}}" sc start cloudflared >nul 2>&1 exit /b 0` batchFileName = "cfd_update.bat" @@ -86,8 +89,25 @@ func (v *WorkersVersion) Apply() error { return err } - // check that the file is what is expected - if err := isValidChecksum(v.checksum, newFilePath); err != nil { + downloadSum, err := cliutil.FileChecksum(newFilePath) + if err != nil { + return err + } + + // Check that the file downloaded matches what is expected. + if v.checksum != downloadSum { + return errors.New("checksum validation failed") + } + + // Check if the currently running version has the same checksum + if downloadSum == buildInfo.Checksum { + // Currently running binary matches the downloaded binary so we have no reason to update. This is + // typically unexpected, as such we emit a sentry event. + localHub := sentry.CurrentHub().Clone() + err := errors.New("checksum validation matches currently running process") + localHub.CaptureException(err) + // Make sure to cleanup the new downloaded file since we aren't upgrading versions. + os.Remove(newFilePath) return err } @@ -189,27 +209,6 @@ func isCompressedFile(urlstring string) bool { return strings.HasSuffix(u.Path, ".tgz") } -// checks if the checksum in the json response matches the checksum of the file download -func isValidChecksum(checksum, filePath string) error { - f, err := os.Open(filePath) - if err != nil { - return err - } - defer f.Close() - - h := sha256.New() - if _, err := io.Copy(h, f); err != nil { - return err - } - - hash := fmt.Sprintf("%x", h.Sum(nil)) - - if checksum != hash { - return errors.New("checksum validation failed") - } - return nil -} - // writeBatchFile writes a batch file out to disk // see the dicussion on why it has to be done this way func writeBatchFile(targetPath string, newPath string, oldPath string) error { diff --git a/component-tests/README.md b/component-tests/README.md index 537fb47e..6eac7782 100644 --- a/component-tests/README.md +++ b/component-tests/README.md @@ -1,9 +1,9 @@ # Requirements -1. Python 3.7 or later with packages in the given `requirements.txt` - - E.g. with conda: - - `conda create -n component-tests python=3.7` - - `conda activate component-tests` - - `pip3 install -r requirements.txt` +1. Python 3.10 or later with packages in the given `requirements.txt` + - E.g. with venv: + - `python3 -m venv ./.venv` + - `source ./.venv/bin/activate` + - `python3 -m pip install -r requirements.txt` 2. Create a config yaml file, for example: ``` diff --git a/component-tests/test_termination.py b/component-tests/test_termination.py index 26f4fea4..128d95d6 100644 --- a/component-tests/test_termination.py +++ b/component-tests/test_termination.py @@ -45,9 +45,10 @@ class TestTermination: with connected: connected.wait(self.timeout) # Send signal after the SSE connection is established - self.terminate_by_signal(cloudflared, signal) - self.wait_eyeball_thread( - in_flight_req, self.grace_period + self.timeout) + with self.within_grace_period(): + self.terminate_by_signal(cloudflared, signal) + self.wait_eyeball_thread( + in_flight_req, self.grace_period + self.timeout) # test cloudflared terminates before grace period expires when all eyeball # connections are drained @@ -66,7 +67,7 @@ class TestTermination: with connected: connected.wait(self.timeout) - with self.within_grace_period(): + with self.within_grace_period(has_connection=False): # Send signal after the SSE connection is established self.terminate_by_signal(cloudflared, signal) self.wait_eyeball_thread(in_flight_req, self.grace_period) @@ -78,7 +79,7 @@ class TestTermination: with start_cloudflared( tmp_path, config, cfd_pre_args=["tunnel", "--ha-connections", "1"], new_process=True, capture_output=False) as cloudflared: wait_tunnel_ready(tunnel_url=config.get_url()) - with self.within_grace_period(): + with self.within_grace_period(has_connection=False): self.terminate_by_signal(cloudflared, signal) def terminate_by_signal(self, cloudflared, sig): @@ -92,13 +93,21 @@ class TestTermination: # Using this context asserts logic within the context is executed within grace period @contextmanager - def within_grace_period(self): + def within_grace_period(self, has_connection=True): try: start = time.time() yield finally: + + # If the request takes longer than the grace period then we need to wait at most the grace period. + # If the request fell within the grace period cloudflared can close earlier, but to ensure that it doesn't + # close immediately we add a minimum boundary. If cloudflared shutdown in less than 1s it's likely that + # it shutdown as soon as it received SIGINT. The only way cloudflared can close immediately is if it has no + # in-flight requests + minimum = 1 if has_connection else 0 duration = time.time() - start - assert duration < self.grace_period + # Here we truncate to ensure that we don't fail on minute differences like 10.1 instead of 10 + assert minimum <= int(duration) <= self.grace_period def stream_request(self, config, connected, early_terminate): expected_terminate_message = "502 Bad Gateway" diff --git a/connection/connection.go b/connection/connection.go index 50464e4a..b7376e38 100644 --- a/connection/connection.go +++ b/connection/connection.go @@ -36,6 +36,13 @@ var ( flushableContentTypes = []string{sseContentType, grpcContentType} ) +// TunnelConnection represents the connection to the edge. +// The Serve method is provided to allow clients to handle any errors from the connection encountered during +// processing of the connection. Cancelling of the context provided to Serve will close the connection. +type TunnelConnection interface { + Serve(ctx context.Context) error +} + type Orchestrator interface { UpdateConfig(version int32, config []byte) *pogs.UpdateConfigurationResponse GetConfigJSON() ([]byte, error) diff --git a/connection/control.go b/connection/control.go index e0bfeae9..2e5f1e35 100644 --- a/connection/control.go +++ b/connection/control.go @@ -6,6 +6,8 @@ import ( "net" "time" + "github.com/pkg/errors" + "github.com/cloudflare/cloudflared/management" "github.com/cloudflare/cloudflared/tunnelrpc" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" @@ -100,7 +102,7 @@ func (c *controlStream) ServeControlStream( c.observer.metrics.regSuccess.WithLabelValues("registerConnection").Inc() c.observer.logConnected(registrationDetails.UUID, c.connIndex, registrationDetails.Location, c.edgeAddress, c.protocol) - c.observer.sendConnectedEvent(c.connIndex, c.protocol, registrationDetails.Location) + c.observer.sendConnectedEvent(c.connIndex, c.protocol, registrationDetails.Location, c.edgeAddress) c.connectedFuse.Connected() // if conn index is 0 and tunnel is not remotely managed, then send local ingress rules configuration @@ -116,27 +118,32 @@ func (c *controlStream) ServeControlStream( } } - c.waitForUnregister(ctx, registrationClient) - return nil + return c.waitForUnregister(ctx, registrationClient) } -func (c *controlStream) waitForUnregister(ctx context.Context, registrationClient tunnelrpc.RegistrationClient) { +func (c *controlStream) waitForUnregister(ctx context.Context, registrationClient tunnelrpc.RegistrationClient) error { // wait for connection termination or start of graceful shutdown defer registrationClient.Close() + var shutdownError error select { case <-ctx.Done(): + shutdownError = ctx.Err() break case <-c.gracefulShutdownC: c.stoppedGracefully = true } c.observer.sendUnregisteringEvent(c.connIndex) - registrationClient.GracefulShutdown(ctx, c.gracePeriod) + err := registrationClient.GracefulShutdown(ctx, c.gracePeriod) + if err != nil { + return errors.Wrap(err, "Error shutting down control stream") + } c.observer.log.Info(). Int(management.EventTypeKey, int(management.Cloudflared)). Uint8(LogFieldConnIndex, c.connIndex). IPAddr(LogFieldIPAddress, c.edgeAddress). Msg("Unregistered tunnel connection") + return shutdownError } func (c *controlStream) IsStopped() bool { diff --git a/connection/errors.go b/connection/errors.go index 17cf58a3..1bb34d6d 100644 --- a/connection/errors.go +++ b/connection/errors.go @@ -2,7 +2,6 @@ package connection import ( "github.com/cloudflare/cloudflared/edgediscovery" - "github.com/cloudflare/cloudflared/h2mux" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" ) @@ -71,8 +70,6 @@ func isHandshakeErrRecoverable(err error, connIndex uint8, observer *Observer) b switch err.(type) { case edgediscovery.DialError: log.Error().Msg("Connection unable to dial edge") - case h2mux.MuxerHandshakeError: - log.Error().Msg("Connection handshake with edge server failed") default: log.Error().Msg("Connection failed") return false diff --git a/connection/event.go b/connection/event.go index d10b92fc..f4078fe0 100644 --- a/connection/event.go +++ b/connection/event.go @@ -1,12 +1,15 @@ package connection +import "net" + // Event is something that happened to a connection, e.g. disconnection or registration. type Event struct { - Index uint8 - EventType Status - Location string - Protocol Protocol - URL string + Index uint8 + EventType Status + Location string + Protocol Protocol + URL string + EdgeAddress net.IP } // Status is the status of a connection. diff --git a/connection/h2mux.go b/connection/h2mux.go deleted file mode 100644 index 4de983bc..00000000 --- a/connection/h2mux.go +++ /dev/null @@ -1,32 +0,0 @@ -package connection - -import ( - "time" - - "github.com/rs/zerolog" - - "github.com/cloudflare/cloudflared/h2mux" -) - -const ( - muxerTimeout = 5 * time.Second -) - -type MuxerConfig struct { - HeartbeatInterval time.Duration - MaxHeartbeats uint64 - CompressionSetting h2mux.CompressionSetting - MetricsUpdateFreq time.Duration -} - -func (mc *MuxerConfig) H2MuxerConfig(h h2mux.MuxedStreamHandler, log *zerolog.Logger) *h2mux.MuxerConfig { - return &h2mux.MuxerConfig{ - Timeout: muxerTimeout, - Handler: h, - IsClient: true, - HeartbeatInterval: mc.HeartbeatInterval, - MaxHeartbeats: mc.MaxHeartbeats, - Log: log, - CompressionQuality: mc.CompressionSetting, - } -} diff --git a/connection/h2mux_header.go b/connection/h2mux_header.go deleted file mode 100644 index 3987f0db..00000000 --- a/connection/h2mux_header.go +++ /dev/null @@ -1,128 +0,0 @@ -package connection - -import ( - "fmt" - "net/http" - "net/url" - "strconv" - "strings" - - "github.com/pkg/errors" - - "github.com/cloudflare/cloudflared/h2mux" -) - -// H2RequestHeadersToH1Request converts the HTTP/2 headers coming from origintunneld -// to an HTTP/1 Request object destined for the local origin web service. -// This operation includes conversion of the pseudo-headers into their closest -// HTTP/1 equivalents. See https://tools.ietf.org/html/rfc7540#section-8.1.2.3 -func H2RequestHeadersToH1Request(h2 []h2mux.Header, h1 *http.Request) error { - for _, header := range h2 { - name := strings.ToLower(header.Name) - if !IsH2muxControlRequestHeader(name) { - continue - } - - switch name { - case ":method": - h1.Method = header.Value - case ":scheme": - // noop - use the preexisting scheme from h1.URL - case ":authority": - // Otherwise the host header will be based on the origin URL - h1.Host = header.Value - case ":path": - // We don't want to be an "opinionated" proxy, so ideally we would use :path as-is. - // However, this HTTP/1 Request object belongs to the Go standard library, - // whose URL package makes some opinionated decisions about the encoding of - // URL characters: see the docs of https://godoc.org/net/url#URL, - // in particular the EscapedPath method https://godoc.org/net/url#URL.EscapedPath, - // which is always used when computing url.URL.String(), whether we'd like it or not. - // - // Well, not *always*. We could circumvent this by using url.URL.Opaque. But - // that would present unusual difficulties when using an HTTP proxy: url.URL.Opaque - // is treated differently when HTTP_PROXY is set! - // See https://github.com/golang/go/issues/5684#issuecomment-66080888 - // - // This means we are subject to the behavior of net/url's function `shouldEscape` - // (as invoked with mode=encodePath): https://github.com/golang/go/blob/go1.12.7/src/net/url/url.go#L101 - - if header.Value == "*" { - h1.URL.Path = "*" - continue - } - // Due to the behavior of validation.ValidateUrl, h1.URL may - // already have a partial value, with or without a trailing slash. - base := h1.URL.String() - base = strings.TrimRight(base, "/") - // But we know :path begins with '/', because we handled '*' above - see RFC7540 - requestURL, err := url.Parse(base + header.Value) - if err != nil { - return errors.Wrap(err, fmt.Sprintf("invalid path '%v'", header.Value)) - } - h1.URL = requestURL - case "content-length": - contentLength, err := strconv.ParseInt(header.Value, 10, 64) - if err != nil { - return fmt.Errorf("unparseable content length") - } - h1.ContentLength = contentLength - case RequestUserHeaders: - // Do not forward the serialized headers to the origin -- deserialize them, and ditch the serialized version - // Find and parse user headers serialized into a single one - userHeaders, err := DeserializeHeaders(header.Value) - if err != nil { - return errors.Wrap(err, "Unable to parse user headers") - } - for _, userHeader := range userHeaders { - h1.Header.Add(userHeader.Name, userHeader.Value) - } - default: - // All other control headers shall just be proxied transparently - h1.Header.Add(header.Name, header.Value) - } - } - - return nil -} - -func H1ResponseToH2ResponseHeaders(status int, h1 http.Header) (h2 []h2mux.Header) { - h2 = []h2mux.Header{ - {Name: ":status", Value: strconv.Itoa(status)}, - } - userHeaders := make(http.Header, len(h1)) - for header, values := range h1 { - h2name := strings.ToLower(header) - if h2name == "content-length" { - // This header has meaning in HTTP/2 and will be used by the edge, - // so it should be sent as an HTTP/2 response header. - - // Since these are http2 headers, they're required to be lowercase - h2 = append(h2, h2mux.Header{Name: "content-length", Value: values[0]}) - } else if !IsH2muxControlResponseHeader(h2name) || IsWebsocketClientHeader(h2name) { - // User headers, on the other hand, must all be serialized so that - // HTTP/2 header validation won't be applied to HTTP/1 header values - userHeaders[header] = values - } - } - - // Perform user header serialization and set them in the single header - h2 = append(h2, h2mux.Header{Name: ResponseUserHeaders, Value: SerializeHeaders(userHeaders)}) - return h2 -} - -// IsH2muxControlRequestHeader is called in the direction of eyeball -> origin. -func IsH2muxControlRequestHeader(headerName string) bool { - return headerName == "content-length" || - headerName == "connection" || headerName == "upgrade" || // Websocket request headers - strings.HasPrefix(headerName, ":") || - strings.HasPrefix(headerName, "cf-") -} - -// IsH2muxControlResponseHeader is called in the direction of eyeball <- origin. -func IsH2muxControlResponseHeader(headerName string) bool { - return headerName == "content-length" || - strings.HasPrefix(headerName, ":") || - strings.HasPrefix(headerName, "cf-int-") || - strings.HasPrefix(headerName, "cf-cloudflared-") -} diff --git a/connection/h2mux_header_test.go b/connection/h2mux_header_test.go deleted file mode 100644 index a78e02f4..00000000 --- a/connection/h2mux_header_test.go +++ /dev/null @@ -1,642 +0,0 @@ -package connection - -import ( - "fmt" - "math/rand" - "net/http" - "net/url" - "reflect" - "regexp" - "strings" - "testing" - "testing/quick" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/cloudflare/cloudflared/h2mux" -) - -type ByName []h2mux.Header - -func (a ByName) Len() int { return len(a) } -func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a ByName) Less(i, j int) bool { - if a[i].Name == a[j].Name { - return a[i].Value < a[j].Value - } - - return a[i].Name < a[j].Name -} - -func TestH2RequestHeadersToH1Request_RegularHeaders(t *testing.T) { - request, err := http.NewRequest(http.MethodGet, "http://example.com", nil) - assert.NoError(t, err) - - mockHeaders := http.Header{ - "Mock header 1": {"Mock value 1"}, - "Mock header 2": {"Mock value 2"}, - } - - headersConversionErr := H2RequestHeadersToH1Request(createSerializedHeaders(RequestUserHeaders, mockHeaders), request) - - assert.True(t, reflect.DeepEqual(mockHeaders, request.Header)) - assert.NoError(t, headersConversionErr) -} - -func createSerializedHeaders(headersField string, headers http.Header) []h2mux.Header { - return []h2mux.Header{{ - Name: headersField, - Value: SerializeHeaders(headers), - }} -} - -func TestH2RequestHeadersToH1Request_NoHeaders(t *testing.T) { - request, err := http.NewRequest(http.MethodGet, "http://example.com", nil) - assert.NoError(t, err) - - emptyHeaders := make(http.Header) - headersConversionErr := H2RequestHeadersToH1Request( - []h2mux.Header{{ - Name: RequestUserHeaders, - Value: SerializeHeaders(emptyHeaders), - }}, - request, - ) - - assert.True(t, reflect.DeepEqual(emptyHeaders, request.Header)) - assert.NoError(t, headersConversionErr) -} - -func TestH2RequestHeadersToH1Request_InvalidHostPath(t *testing.T) { - request, err := http.NewRequest(http.MethodGet, "http://example.com", nil) - assert.NoError(t, err) - - mockRequestHeaders := []h2mux.Header{ - {Name: ":path", Value: "//bad_path/"}, - {Name: RequestUserHeaders, Value: SerializeHeaders(http.Header{"Mock header": {"Mock value"}})}, - } - - headersConversionErr := H2RequestHeadersToH1Request(mockRequestHeaders, request) - - assert.Equal(t, http.Header{ - "Mock header": []string{"Mock value"}, - }, request.Header) - - assert.Equal(t, "http://example.com//bad_path/", request.URL.String()) - - assert.NoError(t, headersConversionErr) -} - -func TestH2RequestHeadersToH1Request_HostPathWithQuery(t *testing.T) { - request, err := http.NewRequest(http.MethodGet, "http://example.com/", nil) - assert.NoError(t, err) - - mockRequestHeaders := []h2mux.Header{ - {Name: ":path", Value: "/?query=mock%20value"}, - {Name: RequestUserHeaders, Value: SerializeHeaders(http.Header{"Mock header": {"Mock value"}})}, - } - - headersConversionErr := H2RequestHeadersToH1Request(mockRequestHeaders, request) - - assert.Equal(t, http.Header{ - "Mock header": []string{"Mock value"}, - }, request.Header) - - assert.Equal(t, "http://example.com/?query=mock%20value", request.URL.String()) - - assert.NoError(t, headersConversionErr) -} - -func TestH2RequestHeadersToH1Request_HostPathWithURLEncoding(t *testing.T) { - request, err := http.NewRequest(http.MethodGet, "http://example.com/", nil) - assert.NoError(t, err) - - mockRequestHeaders := []h2mux.Header{ - {Name: ":path", Value: "/mock%20path"}, - {Name: RequestUserHeaders, Value: SerializeHeaders(http.Header{"Mock header": {"Mock value"}})}, - } - - headersConversionErr := H2RequestHeadersToH1Request(mockRequestHeaders, request) - - assert.Equal(t, http.Header{ - "Mock header": []string{"Mock value"}, - }, request.Header) - - assert.Equal(t, "http://example.com/mock%20path", request.URL.String()) - - assert.NoError(t, headersConversionErr) -} - -func TestH2RequestHeadersToH1Request_WeirdURLs(t *testing.T) { - type testCase struct { - path string - want string - } - testCases := []testCase{ - { - path: "", - want: "", - }, - { - path: "/", - want: "/", - }, - { - path: "//", - want: "//", - }, - { - path: "/test", - want: "/test", - }, - { - path: "//test", - want: "//test", - }, - { - // https://github.com/cloudflare/cloudflared/issues/81 - path: "//test/", - want: "//test/", - }, - { - path: "/%2Ftest", - want: "/%2Ftest", - }, - { - path: "//%20test", - want: "//%20test", - }, - { - // https://github.com/cloudflare/cloudflared/issues/124 - path: "/test?get=somthing%20a", - want: "/test?get=somthing%20a", - }, - { - path: "/%20", - want: "/%20", - }, - { - // stdlib's EscapedPath() will always percent-encode ' ' - path: "/ ", - want: "/%20", - }, - { - path: "/ a ", - want: "/%20a%20", - }, - { - path: "/a%20b", - want: "/a%20b", - }, - { - path: "/foo/bar;param?query#frag", - want: "/foo/bar;param?query#frag", - }, - { - // stdlib's EscapedPath() will always percent-encode non-ASCII chars - path: "/a␠b", - want: "/a%E2%90%A0b", - }, - { - path: "/a-umlaut-ä", - want: "/a-umlaut-%C3%A4", - }, - { - path: "/a-umlaut-%C3%A4", - want: "/a-umlaut-%C3%A4", - }, - { - path: "/a-umlaut-%c3%a4", - want: "/a-umlaut-%c3%a4", - }, - { - // here the second '#' is treated as part of the fragment - path: "/a#b#c", - want: "/a#b%23c", - }, - { - path: "/a#b␠c", - want: "/a#b%E2%90%A0c", - }, - { - path: "/a#b%20c", - want: "/a#b%20c", - }, - { - path: "/a#b c", - want: "/a#b%20c", - }, - { - // stdlib's EscapedPath() will always percent-encode '\' - path: "/\\", - want: "/%5C", - }, - { - path: "/a\\", - want: "/a%5C", - }, - { - path: "/a,b.c.", - want: "/a,b.c.", - }, - { - path: "/.", - want: "/.", - }, - { - // stdlib's EscapedPath() will always percent-encode '`' - path: "/a`", - want: "/a%60", - }, - { - path: "/a[0]", - want: "/a[0]", - }, - { - path: "/?a[0]=5 &b[]=", - want: "/?a[0]=5 &b[]=", - }, - { - path: "/?a=%22b%20%22", - want: "/?a=%22b%20%22", - }, - } - - for index, testCase := range testCases { - requestURL := "https://example.com" - - request, err := http.NewRequest(http.MethodGet, requestURL, nil) - assert.NoError(t, err) - - mockRequestHeaders := []h2mux.Header{ - {Name: ":path", Value: testCase.path}, - {Name: RequestUserHeaders, Value: SerializeHeaders(http.Header{"Mock header": {"Mock value"}})}, - } - - headersConversionErr := H2RequestHeadersToH1Request(mockRequestHeaders, request) - assert.NoError(t, headersConversionErr) - - assert.Equal(t, - http.Header{ - "Mock header": []string{"Mock value"}, - }, - request.Header) - - assert.Equal(t, - "https://example.com"+testCase.want, - request.URL.String(), - "Failed URL index: %v %#v", index, testCase) - } -} - -func TestH2RequestHeadersToH1Request_QuickCheck(t *testing.T) { - config := &quick.Config{ - Values: func(args []reflect.Value, rand *rand.Rand) { - args[0] = reflect.ValueOf(randomHTTP2Path(t, rand)) - }, - } - - type testOrigin struct { - url string - - expectedScheme string - expectedBasePath string - } - testOrigins := []testOrigin{ - { - url: "http://origin.hostname.example.com:8080", - expectedScheme: "http", - expectedBasePath: "http://origin.hostname.example.com:8080", - }, - { - url: "http://origin.hostname.example.com:8080/", - expectedScheme: "http", - expectedBasePath: "http://origin.hostname.example.com:8080", - }, - { - url: "http://origin.hostname.example.com:8080/api", - expectedScheme: "http", - expectedBasePath: "http://origin.hostname.example.com:8080/api", - }, - { - url: "http://origin.hostname.example.com:8080/api/", - expectedScheme: "http", - expectedBasePath: "http://origin.hostname.example.com:8080/api", - }, - { - url: "https://origin.hostname.example.com:8080/api", - expectedScheme: "https", - expectedBasePath: "https://origin.hostname.example.com:8080/api", - }, - } - - // use multiple schemes to demonstrate that the URL is based on the - // origin's scheme, not the :scheme header - for _, testScheme := range []string{"http", "https"} { - for _, testOrigin := range testOrigins { - assertion := func(testPath string) bool { - const expectedMethod = "POST" - const expectedHostname = "request.hostname.example.com" - - h2 := []h2mux.Header{ - {Name: ":method", Value: expectedMethod}, - {Name: ":scheme", Value: testScheme}, - {Name: ":authority", Value: expectedHostname}, - {Name: ":path", Value: testPath}, - {Name: RequestUserHeaders, Value: ""}, - } - h1, err := http.NewRequest("GET", testOrigin.url, nil) - require.NoError(t, err) - - err = H2RequestHeadersToH1Request(h2, h1) - return assert.NoError(t, err) && - assert.Equal(t, expectedMethod, h1.Method) && - assert.Equal(t, expectedHostname, h1.Host) && - assert.Equal(t, testOrigin.expectedScheme, h1.URL.Scheme) && - assert.Equal(t, testOrigin.expectedBasePath+testPath, h1.URL.String()) - } - err := quick.Check(assertion, config) - assert.NoError(t, err) - } - } -} - -func randomASCIIPrintableChar(rand *rand.Rand) int { - // smallest printable ASCII char is 32, largest is 126 - const startPrintable = 32 - const endPrintable = 127 - return startPrintable + rand.Intn(endPrintable-startPrintable) -} - -// randomASCIIText generates an ASCII string, some of whose characters may be -// percent-encoded. Its "logical length" (ignoring percent-encoding) is -// between 1 and `maxLength`. -func randomASCIIText(rand *rand.Rand, minLength int, maxLength int) string { - length := minLength + rand.Intn(maxLength) - var result strings.Builder - for i := 0; i < length; i++ { - c := randomASCIIPrintableChar(rand) - - // 1/4 chance of using percent encoding when not necessary - if c == '%' || rand.Intn(4) == 0 { - result.WriteString(fmt.Sprintf("%%%02X", c)) - } else { - result.WriteByte(byte(c)) - } - } - return result.String() -} - -// Calls `randomASCIIText` and ensures the result is a valid URL path, -// i.e. one that can pass unchanged through url.URL.String() -func randomHTTP1Path(t *testing.T, rand *rand.Rand, minLength int, maxLength int) string { - text := randomASCIIText(rand, minLength, maxLength) - re, err := regexp.Compile("[^/;,]*") - require.NoError(t, err) - return "/" + re.ReplaceAllStringFunc(text, url.PathEscape) -} - -// Calls `randomASCIIText` and ensures the result is a valid URL query, -// i.e. one that can pass unchanged through url.URL.String() -func randomHTTP1Query(rand *rand.Rand, minLength int, maxLength int) string { - text := randomASCIIText(rand, minLength, maxLength) - return "?" + strings.ReplaceAll(text, "#", "%23") -} - -// Calls `randomASCIIText` and ensures the result is a valid URL fragment, -// i.e. one that can pass unchanged through url.URL.String() -func randomHTTP1Fragment(t *testing.T, rand *rand.Rand, minLength int, maxLength int) string { - text := randomASCIIText(rand, minLength, maxLength) - u, err := url.Parse("#" + text) - require.NoError(t, err) - return u.String() -} - -// Assemble a random :path pseudoheader that is legal by Go stdlib standards -// (i.e. all characters will satisfy "net/url".shouldEscape for their respective locations) -func randomHTTP2Path(t *testing.T, rand *rand.Rand) string { - result := randomHTTP1Path(t, rand, 1, 64) - if rand.Intn(2) == 1 { - result += randomHTTP1Query(rand, 1, 32) - } - if rand.Intn(2) == 1 { - result += randomHTTP1Fragment(t, rand, 1, 16) - } - return result -} - -func stdlibHeaderToH2muxHeader(headers http.Header) (h2muxHeaders []h2mux.Header) { - for name, values := range headers { - for _, value := range values { - h2muxHeaders = append(h2muxHeaders, h2mux.Header{Name: name, Value: value}) - } - } - - return h2muxHeaders -} - -func TestParseRequestHeaders(t *testing.T) { - mockUserHeadersToSerialize := http.Header{ - "Mock-Header-One": {"1", "1.5"}, - "Mock-Header-Two": {"2"}, - "Mock-Header-Three": {"3"}, - } - - mockHeaders := []h2mux.Header{ - {Name: "One", Value: "1"}, // will be dropped - {Name: "Cf-Two", Value: "cf-value-1"}, - {Name: "Cf-Two", Value: "cf-value-2"}, - {Name: RequestUserHeaders, Value: SerializeHeaders(mockUserHeadersToSerialize)}, - } - - expectedHeaders := []h2mux.Header{ - {Name: "Cf-Two", Value: "cf-value-1"}, - {Name: "Cf-Two", Value: "cf-value-2"}, - {Name: "Mock-Header-One", Value: "1"}, - {Name: "Mock-Header-One", Value: "1.5"}, - {Name: "Mock-Header-Two", Value: "2"}, - {Name: "Mock-Header-Three", Value: "3"}, - } - h1 := &http.Request{ - Header: make(http.Header), - } - err := H2RequestHeadersToH1Request(mockHeaders, h1) - assert.NoError(t, err) - assert.ElementsMatch(t, expectedHeaders, stdlibHeaderToH2muxHeader(h1.Header)) -} - -func TestIsH2muxControlRequestHeader(t *testing.T) { - controlRequestHeaders := []string{ - // Anything that begins with cf- - "cf-sample-header", - - // Any http2 pseudoheader - ":sample-pseudo-header", - - // content-length is a special case, it has to be there - // for some requests to work (per the HTTP2 spec) - "content-length", - - // Websocket request headers - "connection", - "upgrade", - } - - for _, header := range controlRequestHeaders { - assert.True(t, IsH2muxControlRequestHeader(header)) - } -} - -func TestIsH2muxControlResponseHeader(t *testing.T) { - controlResponseHeaders := []string{ - // Anything that begins with cf-int- or cf-cloudflared- - "cf-int-sample-header", - "cf-cloudflared-sample-header", - - // Any http2 pseudoheader - ":sample-pseudo-header", - - // content-length is a special case, it has to be there - // for some requests to work (per the HTTP2 spec) - "content-length", - } - - for _, header := range controlResponseHeaders { - assert.True(t, IsH2muxControlResponseHeader(header)) - } -} - -func TestIsNotH2muxControlRequestHeader(t *testing.T) { - notControlRequestHeaders := []string{ - "mock-header", - "another-sample-header", - } - - for _, header := range notControlRequestHeaders { - assert.False(t, IsH2muxControlRequestHeader(header)) - } -} - -func TestIsNotH2muxControlResponseHeader(t *testing.T) { - notControlResponseHeaders := []string{ - "mock-header", - "another-sample-header", - "upgrade", - "connection", - "cf-whatever", // On the response path, we only want to filter cf-int- and cf-cloudflared- - } - - for _, header := range notControlResponseHeaders { - assert.False(t, IsH2muxControlResponseHeader(header)) - } -} - -func TestH1ResponseToH2ResponseHeaders(t *testing.T) { - mockHeaders := http.Header{ - "User-header-one": {""}, - "User-header-two": {"1", "2"}, - "cf-header": {"cf-value"}, - "cf-int-header": {"cf-int-value"}, - "cf-cloudflared-header": {"cf-cloudflared-value"}, - "Content-Length": {"123"}, - } - mockResponse := http.Response{ - StatusCode: 200, - Header: mockHeaders, - } - - headers := H1ResponseToH2ResponseHeaders(mockResponse.StatusCode, mockResponse.Header) - - serializedHeadersIndex := -1 - for i, header := range headers { - if header.Name == ResponseUserHeaders { - serializedHeadersIndex = i - break - } - } - assert.NotEqual(t, -1, serializedHeadersIndex) - actualControlHeaders := append( - headers[:serializedHeadersIndex], - headers[serializedHeadersIndex+1:]..., - ) - expectedControlHeaders := []h2mux.Header{ - {Name: ":status", Value: "200"}, - {Name: "content-length", Value: "123"}, - } - - assert.ElementsMatch(t, expectedControlHeaders, actualControlHeaders) - - actualUserHeaders, err := DeserializeHeaders(headers[serializedHeadersIndex].Value) - expectedUserHeaders := []h2mux.Header{ - {Name: "User-header-one", Value: ""}, - {Name: "User-header-two", Value: "1"}, - {Name: "User-header-two", Value: "2"}, - {Name: "cf-header", Value: "cf-value"}, - } - assert.NoError(t, err) - assert.ElementsMatch(t, expectedUserHeaders, actualUserHeaders) -} - -// The purpose of this test is to check that our code and the http.Header -// implementation don't throw validation errors about header size -func TestHeaderSize(t *testing.T) { - largeValue := randSeq(5 * 1024 * 1024) // 5Mb - largeHeaders := http.Header{ - "User-header": {largeValue}, - } - mockResponse := http.Response{ - StatusCode: 200, - Header: largeHeaders, - } - - serializedHeaders := H1ResponseToH2ResponseHeaders(mockResponse.StatusCode, mockResponse.Header) - request, err := http.NewRequest(http.MethodGet, "https://example.com/", nil) - assert.NoError(t, err) - for _, header := range serializedHeaders { - request.Header.Set(header.Name, header.Value) - } - - for _, header := range serializedHeaders { - if header.Name != ResponseUserHeaders { - continue - } - - deserializedHeaders, err := DeserializeHeaders(header.Value) - assert.NoError(t, err) - assert.Equal(t, largeValue, deserializedHeaders[0].Value) - } -} - -func randSeq(n int) string { - randomizer := rand.New(rand.NewSource(17)) - var letters = []rune(":;,+/=abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") - b := make([]rune, n) - for i := range b { - b[i] = letters[randomizer.Intn(len(letters))] - } - return string(b) -} - -func BenchmarkH1ResponseToH2ResponseHeaders(b *testing.B) { - ser := "eC1mb3J3YXJkZWQtcHJvdG8:aHR0cHM;dXBncmFkZS1pbnNlY3VyZS1yZXF1ZXN0cw:MQ;YWNjZXB0LWxhbmd1YWdl:ZW4tVVMsZW47cT0wLjkscnU7cT0wLjg;YWNjZXB0LWVuY29kaW5n:Z3ppcA;eC1mb3J3YXJkZWQtZm9y:MTczLjI0NS42MC42;dXNlci1hZ2VudA:TW96aWxsYS81LjAgKE1hY2ludG9zaDsgSW50ZWwgTWFjIE9TIFggMTBfMTRfNikgQXBwbGVXZWJLaXQvNTM3LjM2IChLSFRNTCwgbGlrZSBHZWNrbykgQ2hyb21lLzg0LjAuNDE0Ny44OSBTYWZhcmkvNTM3LjM2;c2VjLWZldGNoLW1vZGU:bmF2aWdhdGU;Y2RuLWxvb3A:Y2xvdWRmbGFyZQ;c2VjLWZldGNoLWRlc3Q:ZG9jdW1lbnQ;c2VjLWZldGNoLXVzZXI:PzE;c2VjLWZldGNoLXNpdGU:bm9uZQ;Y29va2ll:X19jZmR1aWQ9ZGNkOWZjOGNjNWMxMzE0NTMyYTFkMjhlZDEyOWRhOTYwMTU2OTk1MTYzNDsgX19jZl9ibT1mYzY2MzMzYzAzZmM0MWFiZTZmOWEyYzI2ZDUwOTA0YzIxYzZhMTQ2LTE1OTU2MjIzNDEtMTgwMC1BZTVzS2pIU2NiWGVFM05mMUhrTlNQMG1tMHBLc2pQWkloVnM1Z2g1SkNHQkFhS1UxVDB2b003alBGN3FjMHVSR2NjZGcrWHdhL1EzbTJhQzdDVU4xZ2M9;YWNjZXB0:dGV4dC9odG1sLGFwcGxpY2F0aW9uL3hodG1sK3htbCxhcHBsaWNhdGlvbi94bWw7cT0wLjksaW1hZ2Uvd2VicCxpbWFnZS9hcG5nLCovKjtxPTAuOCxhcHBsaWNhdGlvbi9zaWduZWQtZXhjaGFuZ2U7dj1iMztxPTAuOQ" - h2, _ := DeserializeHeaders(ser) - h1 := make(http.Header) - for _, header := range h2 { - h1.Add(header.Name, header.Value) - } - h1.Add("Content-Length", "200") - h1.Add("Cf-Something", "Else") - h1.Add("Upgrade", "websocket") - - h1resp := &http.Response{ - StatusCode: 200, - Header: h1, - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - _ = H1ResponseToH2ResponseHeaders(h1resp.StatusCode, h1resp.Header) - } -} diff --git a/connection/header.go b/connection/header.go index d1544263..516c5df6 100644 --- a/connection/header.go +++ b/connection/header.go @@ -7,17 +7,15 @@ import ( "strings" "github.com/pkg/errors" - - "github.com/cloudflare/cloudflared/h2mux" ) var ( - // h2mux-style special headers + // internal special headers RequestUserHeaders = "cf-cloudflared-request-headers" ResponseUserHeaders = "cf-cloudflared-response-headers" ResponseMetaHeader = "cf-cloudflared-response-meta" - // h2mux-style special headers + // internal special headers CanonicalResponseUserHeaders = http.CanonicalHeaderKey(ResponseUserHeaders) CanonicalResponseMetaHeader = http.CanonicalHeaderKey(ResponseMetaHeader) ) @@ -28,6 +26,13 @@ var ( responseMetaHeaderOrigin = mustInitRespMetaHeader("origin") ) +// HTTPHeader is a custom header struct that expects only ever one value for the header. +// This structure is used to serialize the headers and attach them to the HTTP2 request when proxying. +type HTTPHeader struct { + Name string + Value string +} + type responseMetaHeader struct { Source string `json:"src"` } @@ -104,10 +109,10 @@ func SerializeHeaders(h1Headers http.Header) string { } // Deserialize headers serialized by `SerializeHeader` -func DeserializeHeaders(serializedHeaders string) ([]h2mux.Header, error) { +func DeserializeHeaders(serializedHeaders string) ([]HTTPHeader, error) { const unableToDeserializeErr = "Unable to deserialize headers" - var deserialized []h2mux.Header + var deserialized []HTTPHeader for _, serializedPair := range strings.Split(serializedHeaders, ";") { if len(serializedPair) == 0 { continue @@ -130,7 +135,7 @@ func DeserializeHeaders(serializedHeaders string) ([]h2mux.Header, error) { return nil, errors.Wrap(err, unableToDeserializeErr) } - deserialized = append(deserialized, h2mux.Header{ + deserialized = append(deserialized, HTTPHeader{ Name: string(deserializedName), Value: string(deserializedValue), }) diff --git a/connection/header_test.go b/connection/header_test.go index 88add316..1ca4b31b 100644 --- a/connection/header_test.go +++ b/connection/header_test.go @@ -46,18 +46,40 @@ func TestSerializeHeaders(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 13, len(deserializedHeaders)) - h2muxExpectedHeaders := stdlibHeaderToH2muxHeader(mockHeaders) + expectedHeaders := headerToReqHeader(mockHeaders) sort.Sort(ByName(deserializedHeaders)) - sort.Sort(ByName(h2muxExpectedHeaders)) + sort.Sort(ByName(expectedHeaders)) assert.True( t, - reflect.DeepEqual(h2muxExpectedHeaders, deserializedHeaders), - fmt.Sprintf("got = %#v, want = %#v\n", deserializedHeaders, h2muxExpectedHeaders), + reflect.DeepEqual(expectedHeaders, deserializedHeaders), + fmt.Sprintf("got = %#v, want = %#v\n", deserializedHeaders, expectedHeaders), ) } +type ByName []HTTPHeader + +func (a ByName) Len() int { return len(a) } +func (a ByName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a ByName) Less(i, j int) bool { + if a[i].Name == a[j].Name { + return a[i].Value < a[j].Value + } + + return a[i].Name < a[j].Name +} + +func headerToReqHeader(headers http.Header) (reqHeaders []HTTPHeader) { + for name, values := range headers { + for _, value := range values { + reqHeaders = append(reqHeaders, HTTPHeader{Name: name, Value: value}) + } + } + + return reqHeaders +} + func TestSerializeNoHeaders(t *testing.T) { request, err := http.NewRequest(http.MethodGet, "http://example.com", nil) assert.NoError(t, err) diff --git a/connection/http2.go b/connection/http2.go index f5e4d873..aee9d9da 100644 --- a/connection/http2.go +++ b/connection/http2.go @@ -385,8 +385,7 @@ func determineHTTP2Type(r *http.Request) Type { func handleMissingRequestParts(connType Type, r *http.Request) { if connType == TypeHTTP { // http library has no guarantees that we receive a filled URL. If not, then we fill it, as we reuse the request - // for proxying. We use the same values as we used to in h2mux. For proxying they should not matter since we - // control the dialer on every egress proxied. + // for proxying. For proxying they should not matter since we control the dialer on every egress proxied. if len(r.URL.Scheme) == 0 { r.URL.Scheme = "http" } diff --git a/connection/http2_test.go b/connection/http2_test.go index a0ec8b45..92665688 100644 --- a/connection/http2_test.go +++ b/connection/http2_test.go @@ -192,8 +192,9 @@ func (mc mockNamedTunnelRPCClient) RegisterConnection( }, nil } -func (mc mockNamedTunnelRPCClient) GracefulShutdown(ctx context.Context, gracePeriod time.Duration) { +func (mc mockNamedTunnelRPCClient) GracefulShutdown(ctx context.Context, gracePeriod time.Duration) error { close(mc.unregistered) + return nil } func (mockNamedTunnelRPCClient) Close() {} diff --git a/connection/metrics.go b/connection/metrics.go index c80bf46a..0801ebbc 100644 --- a/connection/metrics.go +++ b/connection/metrics.go @@ -2,11 +2,8 @@ package connection import ( "sync" - "time" "github.com/prometheus/client_golang/prometheus" - - "github.com/cloudflare/cloudflared/h2mux" ) const ( @@ -16,27 +13,6 @@ const ( configSubsystem = "config" ) -type muxerMetrics struct { - rtt *prometheus.GaugeVec - rttMin *prometheus.GaugeVec - rttMax *prometheus.GaugeVec - receiveWindowAve *prometheus.GaugeVec - sendWindowAve *prometheus.GaugeVec - receiveWindowMin *prometheus.GaugeVec - receiveWindowMax *prometheus.GaugeVec - sendWindowMin *prometheus.GaugeVec - sendWindowMax *prometheus.GaugeVec - inBoundRateCurr *prometheus.GaugeVec - inBoundRateMin *prometheus.GaugeVec - inBoundRateMax *prometheus.GaugeVec - outBoundRateCurr *prometheus.GaugeVec - outBoundRateMin *prometheus.GaugeVec - outBoundRateMax *prometheus.GaugeVec - compBytesBefore *prometheus.GaugeVec - compBytesAfter *prometheus.GaugeVec - compRateAve *prometheus.GaugeVec -} - type localConfigMetrics struct { pushes prometheus.Counter pushesErrors prometheus.Counter @@ -53,7 +29,6 @@ type tunnelMetrics struct { regFail *prometheus.CounterVec rpcFail *prometheus.CounterVec - muxerMetrics *muxerMetrics tunnelsHA tunnelsForHA userHostnamesCounts *prometheus.CounterVec @@ -91,252 +66,6 @@ func newLocalConfigMetrics() *localConfigMetrics { } } -func newMuxerMetrics() *muxerMetrics { - rtt := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "rtt", - Help: "Round-trip time in millisecond", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(rtt) - - rttMin := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "rtt_min", - Help: "Shortest round-trip time in millisecond", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(rttMin) - - rttMax := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "rtt_max", - Help: "Longest round-trip time in millisecond", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(rttMax) - - receiveWindowAve := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "receive_window_ave", - Help: "Average receive window size in bytes", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(receiveWindowAve) - - sendWindowAve := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "send_window_ave", - Help: "Average send window size in bytes", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(sendWindowAve) - - receiveWindowMin := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "receive_window_min", - Help: "Smallest receive window size in bytes", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(receiveWindowMin) - - receiveWindowMax := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "receive_window_max", - Help: "Largest receive window size in bytes", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(receiveWindowMax) - - sendWindowMin := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "send_window_min", - Help: "Smallest send window size in bytes", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(sendWindowMin) - - sendWindowMax := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "send_window_max", - Help: "Largest send window size in bytes", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(sendWindowMax) - - inBoundRateCurr := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "inbound_bytes_per_sec_curr", - Help: "Current inbounding bytes per second, 0 if there is no incoming connection", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(inBoundRateCurr) - - inBoundRateMin := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "inbound_bytes_per_sec_min", - Help: "Minimum non-zero inbounding bytes per second", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(inBoundRateMin) - - inBoundRateMax := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "inbound_bytes_per_sec_max", - Help: "Maximum inbounding bytes per second", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(inBoundRateMax) - - outBoundRateCurr := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "outbound_bytes_per_sec_curr", - Help: "Current outbounding bytes per second, 0 if there is no outgoing traffic", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(outBoundRateCurr) - - outBoundRateMin := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "outbound_bytes_per_sec_min", - Help: "Minimum non-zero outbounding bytes per second", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(outBoundRateMin) - - outBoundRateMax := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "outbound_bytes_per_sec_max", - Help: "Maximum outbounding bytes per second", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(outBoundRateMax) - - compBytesBefore := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "comp_bytes_before", - Help: "Bytes sent via cross-stream compression, pre compression", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(compBytesBefore) - - compBytesAfter := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "comp_bytes_after", - Help: "Bytes sent via cross-stream compression, post compression", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(compBytesAfter) - - compRateAve := prometheus.NewGaugeVec( - prometheus.GaugeOpts{ - Namespace: MetricsNamespace, - Subsystem: muxerSubsystem, - Name: "comp_rate_ave", - Help: "Average outbound cross-stream compression ratio", - }, - []string{"connection_id"}, - ) - prometheus.MustRegister(compRateAve) - - return &muxerMetrics{ - rtt: rtt, - rttMin: rttMin, - rttMax: rttMax, - receiveWindowAve: receiveWindowAve, - sendWindowAve: sendWindowAve, - receiveWindowMin: receiveWindowMin, - receiveWindowMax: receiveWindowMax, - sendWindowMin: sendWindowMin, - sendWindowMax: sendWindowMax, - inBoundRateCurr: inBoundRateCurr, - inBoundRateMin: inBoundRateMin, - inBoundRateMax: inBoundRateMax, - outBoundRateCurr: outBoundRateCurr, - outBoundRateMin: outBoundRateMin, - outBoundRateMax: outBoundRateMax, - compBytesBefore: compBytesBefore, - compBytesAfter: compBytesAfter, - compRateAve: compRateAve, - } -} - -func (m *muxerMetrics) update(connectionID string, metrics *h2mux.MuxerMetrics) { - m.rtt.WithLabelValues(connectionID).Set(convertRTTMilliSec(metrics.RTT)) - m.rttMin.WithLabelValues(connectionID).Set(convertRTTMilliSec(metrics.RTTMin)) - m.rttMax.WithLabelValues(connectionID).Set(convertRTTMilliSec(metrics.RTTMax)) - m.receiveWindowAve.WithLabelValues(connectionID).Set(metrics.ReceiveWindowAve) - m.sendWindowAve.WithLabelValues(connectionID).Set(metrics.SendWindowAve) - m.receiveWindowMin.WithLabelValues(connectionID).Set(float64(metrics.ReceiveWindowMin)) - m.receiveWindowMax.WithLabelValues(connectionID).Set(float64(metrics.ReceiveWindowMax)) - m.sendWindowMin.WithLabelValues(connectionID).Set(float64(metrics.SendWindowMin)) - m.sendWindowMax.WithLabelValues(connectionID).Set(float64(metrics.SendWindowMax)) - m.inBoundRateCurr.WithLabelValues(connectionID).Set(float64(metrics.InBoundRateCurr)) - m.inBoundRateMin.WithLabelValues(connectionID).Set(float64(metrics.InBoundRateMin)) - m.inBoundRateMax.WithLabelValues(connectionID).Set(float64(metrics.InBoundRateMax)) - m.outBoundRateCurr.WithLabelValues(connectionID).Set(float64(metrics.OutBoundRateCurr)) - m.outBoundRateMin.WithLabelValues(connectionID).Set(float64(metrics.OutBoundRateMin)) - m.outBoundRateMax.WithLabelValues(connectionID).Set(float64(metrics.OutBoundRateMax)) - m.compBytesBefore.WithLabelValues(connectionID).Set(float64(metrics.CompBytesBefore.Value())) - m.compBytesAfter.WithLabelValues(connectionID).Set(float64(metrics.CompBytesAfter.Value())) - m.compRateAve.WithLabelValues(connectionID).Set(float64(metrics.CompRateAve())) -} - -func convertRTTMilliSec(t time.Duration) float64 { - return float64(t / time.Millisecond) -} - // Metrics that can be collected without asking the edge func initTunnelMetrics() *tunnelMetrics { maxConcurrentRequestsPerTunnel := prometheus.NewGaugeVec( @@ -408,7 +137,6 @@ func initTunnelMetrics() *tunnelMetrics { return &tunnelMetrics{ serverLocations: serverLocations, oldServerLocations: make(map[string]string), - muxerMetrics: newMuxerMetrics(), tunnelsHA: newTunnelsForHA(), regSuccess: registerSuccess, regFail: registerFail, @@ -418,10 +146,6 @@ func initTunnelMetrics() *tunnelMetrics { } } -func (t *tunnelMetrics) updateMuxerMetrics(connectionID string, metrics *h2mux.MuxerMetrics) { - t.muxerMetrics.update(connectionID, metrics) -} - func (t *tunnelMetrics) registerServerLocation(connectionID, loc string) { t.locationLock.Lock() defer t.locationLock.Unlock() diff --git a/connection/observer.go b/connection/observer.go index c6cb895e..817e6d2e 100644 --- a/connection/observer.go +++ b/connection/observer.go @@ -47,7 +47,6 @@ func (o *Observer) RegisterSink(sink EventSink) { } func (o *Observer) logConnected(connectionID uuid.UUID, connIndex uint8, location string, address net.IP, protocol Protocol) { - o.sendEvent(Event{Index: connIndex, EventType: Connected, Location: location}) o.log.Info(). Int(management.EventTypeKey, int(management.Cloudflared)). Str(LogFieldConnectionID, connectionID.String()). @@ -63,8 +62,8 @@ func (o *Observer) sendRegisteringEvent(connIndex uint8) { o.sendEvent(Event{Index: connIndex, EventType: RegisteringTunnel}) } -func (o *Observer) sendConnectedEvent(connIndex uint8, protocol Protocol, location string) { - o.sendEvent(Event{Index: connIndex, EventType: Connected, Protocol: protocol, Location: location}) +func (o *Observer) sendConnectedEvent(connIndex uint8, protocol Protocol, location string, edgeAddress net.IP) { + o.sendEvent(Event{Index: connIndex, EventType: Connected, Protocol: protocol, Location: location, EdgeAddress: edgeAddress}) } func (o *Observer) SendURL(url string) { diff --git a/connection/protocol.go b/connection/protocol.go index ecc367b4..417c8b72 100644 --- a/connection/protocol.go +++ b/connection/protocol.go @@ -13,7 +13,7 @@ import ( const ( AvailableProtocolFlagMessage = "Available protocols: 'auto' - automatically chooses the best protocol over time (the default; and also the recommended one); 'quic' - based on QUIC, relying on UDP egress to Cloudflare edge; 'http2' - using Go's HTTP2 library, relying on TCP egress to Cloudflare edge" - // edgeH2muxTLSServerName is the server name to establish h2mux connection with edge + // edgeH2muxTLSServerName is the server name to establish h2mux connection with edge (unused, but kept for legacy reference). edgeH2muxTLSServerName = "cftunnel.com" // edgeH2TLSServerName is the server name to establish http2 connection with edge edgeH2TLSServerName = "h2.cftunnel.com" diff --git a/connection/quic.go b/connection/quic.go index c5e218f3..3109d77f 100644 --- a/connection/quic.go +++ b/connection/quic.go @@ -1,51 +1,16 @@ package connection import ( - "bufio" "context" "crypto/tls" "fmt" - "io" "net" - "net/http" "net/netip" "runtime" - "strconv" - "strings" "sync" - "sync/atomic" - "time" - "github.com/google/uuid" - "github.com/pkg/errors" "github.com/quic-go/quic-go" "github.com/rs/zerolog" - "go.opentelemetry.io/otel/attribute" - "go.opentelemetry.io/otel/trace" - "golang.org/x/sync/errgroup" - - "github.com/cloudflare/cloudflared/datagramsession" - "github.com/cloudflare/cloudflared/ingress" - "github.com/cloudflare/cloudflared/management" - "github.com/cloudflare/cloudflared/packet" - cfdquic "github.com/cloudflare/cloudflared/quic" - "github.com/cloudflare/cloudflared/tracing" - "github.com/cloudflare/cloudflared/tunnelrpc/pogs" - tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" - rpcquic "github.com/cloudflare/cloudflared/tunnelrpc/quic" -) - -const ( - // HTTPHeaderKey is used to get or set http headers in QUIC ALPN if the underlying proxy connection type is HTTP. - HTTPHeaderKey = "HttpHeader" - // HTTPMethodKey is used to get or set http method in QUIC ALPN if the underlying proxy connection type is HTTP. - HTTPMethodKey = "HttpMethod" - // HTTPHostKey is used to get or set http Method in QUIC ALPN if the underlying proxy connection type is HTTP. - HTTPHostKey = "HttpHost" - - QUICMetadataFlowID = "FlowID" - // emperically this capacity has been working well - demuxChanCapacity = 16 ) var ( @@ -53,46 +18,21 @@ var ( portMapMutex sync.Mutex ) -// QUICConnection represents the type that facilitates Proxying via QUIC streams. -type QUICConnection struct { - session quic.Connection - logger *zerolog.Logger - orchestrator Orchestrator - // sessionManager tracks active sessions. It receives datagrams from quic connection via datagramMuxer - sessionManager datagramsession.Manager - // datagramMuxer mux/demux datagrams from quic connection - datagramMuxer *cfdquic.DatagramMuxerV2 - packetRouter *ingress.PacketRouter - controlStreamHandler ControlStreamHandler - connOptions *tunnelpogs.ConnectionOptions - connIndex uint8 - - rpcTimeout time.Duration - streamWriteTimeout time.Duration -} - -// NewQUICConnection returns a new instance of QUICConnection. -func NewQUICConnection( +func DialQuic( ctx context.Context, quicConfig *quic.Config, - edgeAddr net.Addr, + tlsConfig *tls.Config, + edgeAddr netip.AddrPort, localAddr net.IP, connIndex uint8, - tlsConfig *tls.Config, - orchestrator Orchestrator, - connOptions *tunnelpogs.ConnectionOptions, - controlStreamHandler ControlStreamHandler, logger *zerolog.Logger, - packetRouterConfig *ingress.GlobalRouterConfig, - rpcTimeout time.Duration, - streamWriteTimeout time.Duration, -) (*QUICConnection, error) { - udpConn, err := createUDPConnForConnIndex(connIndex, localAddr, logger) +) (quic.Connection, error) { + udpConn, err := createUDPConnForConnIndex(connIndex, localAddr, edgeAddr, logger) if err != nil { return nil, err } - session, err := quic.Dial(ctx, udpConn, edgeAddr, tlsConfig, quicConfig) + conn, err := quic.Dial(ctx, udpConn, net.UDPAddrFromAddrPort(edgeAddr), tlsConfig, quicConfig) if err != nil { // close the udp server socket in case of error connecting to the edge udpConn.Close() @@ -100,510 +40,22 @@ func NewQUICConnection( } // wrap the session, so that the UDPConn is closed after session is closed. - session = &wrapCloseableConnQuicConnection{ - session, + conn = &wrapCloseableConnQuicConnection{ + conn, udpConn, } - - sessionDemuxChan := make(chan *packet.Session, demuxChanCapacity) - datagramMuxer := cfdquic.NewDatagramMuxerV2(session, logger, sessionDemuxChan) - sessionManager := datagramsession.NewManager(logger, datagramMuxer.SendToSession, sessionDemuxChan) - packetRouter := ingress.NewPacketRouter(packetRouterConfig, datagramMuxer, logger) - - return &QUICConnection{ - session: session, - orchestrator: orchestrator, - logger: logger, - sessionManager: sessionManager, - datagramMuxer: datagramMuxer, - packetRouter: packetRouter, - controlStreamHandler: controlStreamHandler, - connOptions: connOptions, - connIndex: connIndex, - rpcTimeout: rpcTimeout, - streamWriteTimeout: streamWriteTimeout, - }, nil + return conn, nil } -// Serve starts a QUIC session that begins accepting streams. -func (q *QUICConnection) Serve(ctx context.Context) error { - // origintunneld assumes the first stream is used for the control plane - controlStream, err := q.session.OpenStream() - if err != nil { - return fmt.Errorf("failed to open a registration control stream: %w", err) - } - - // If either goroutine returns nil error, we rely on this cancellation to make sure the other goroutine exits - // as fast as possible as well. Nil error means we want to exit for good (caller code won't retry serving this - // connection). - // If either goroutine returns a non nil error, then the error group cancels the context, thus also canceling the - // other goroutine as fast as possible. - ctx, cancel := context.WithCancel(ctx) - errGroup, ctx := errgroup.WithContext(ctx) - - // In the future, if cloudflared can autonomously push traffic to the edge, we have to make sure the control - // stream is already fully registered before the other goroutines can proceed. - errGroup.Go(func() error { - defer cancel() - return q.serveControlStream(ctx, controlStream) - }) - errGroup.Go(func() error { - defer cancel() - return q.acceptStream(ctx) - }) - errGroup.Go(func() error { - defer cancel() - return q.sessionManager.Serve(ctx) - }) - errGroup.Go(func() error { - defer cancel() - return q.datagramMuxer.ServeReceive(ctx) - }) - errGroup.Go(func() error { - defer cancel() - return q.packetRouter.Serve(ctx) - }) - - return errGroup.Wait() -} - -func (q *QUICConnection) serveControlStream(ctx context.Context, controlStream quic.Stream) error { - // This blocks until the control plane is done. - err := q.controlStreamHandler.ServeControlStream(ctx, controlStream, q.connOptions, q.orchestrator) - if err != nil { - // Not wrapping error here to be consistent with the http2 message. - return err - } - - return nil -} - -// Close closes the session with no errors specified. -func (q *QUICConnection) Close() { - q.session.CloseWithError(0, "") -} - -func (q *QUICConnection) acceptStream(ctx context.Context) error { - defer q.Close() - for { - quicStream, err := q.session.AcceptStream(ctx) - if err != nil { - // context.Canceled is usually a user ctrl+c. We don't want to log an error here as it's intentional. - if errors.Is(err, context.Canceled) || q.controlStreamHandler.IsStopped() { - return nil - } - return fmt.Errorf("failed to accept QUIC stream: %w", err) - } - go q.runStream(quicStream) - } -} - -func (q *QUICConnection) runStream(quicStream quic.Stream) { - ctx := quicStream.Context() - stream := cfdquic.NewSafeStreamCloser(quicStream, q.streamWriteTimeout, q.logger) - defer stream.Close() - - // we are going to fuse readers/writers from stream <- cloudflared -> origin, and we want to guarantee that - // code executed in the code path of handleStream don't trigger an earlier close to the downstream write stream. - // So, we wrap the stream with a no-op write closer and only this method can actually close write side of the stream. - // A call to close will simulate a close to the read-side, which will fail subsequent reads. - noCloseStream := &nopCloserReadWriter{ReadWriteCloser: stream} - ss := rpcquic.NewCloudflaredServer(q.handleDataStream, q, q, q.rpcTimeout) - if err := ss.Serve(ctx, noCloseStream); err != nil { - q.logger.Debug().Err(err).Msg("Failed to handle QUIC stream") - - // if we received an error at this level, then close write side of stream with an error, which will result in - // RST_STREAM frame. - quicStream.CancelWrite(0) - } -} - -func (q *QUICConnection) handleDataStream(ctx context.Context, stream *rpcquic.RequestServerStream) error { - request, err := stream.ReadConnectRequestData() - if err != nil { - return err - } - - if err, connectResponseSent := q.dispatchRequest(ctx, stream, err, request); err != nil { - q.logger.Err(err).Str("type", request.Type.String()).Str("dest", request.Dest).Msg("Request failed") - - // if the connectResponse was already sent and we had an error, we need to propagate it up, so that the stream is - // closed with an RST_STREAM frame - if connectResponseSent { - return err - } - - if writeRespErr := stream.WriteConnectResponseData(err); writeRespErr != nil { - return writeRespErr - } - } - - return nil -} - -// dispatchRequest will dispatch the request depending on the type and returns an error if it occurs. -// More importantly, it also tells if the during processing of the request the ConnectResponse metadata was sent downstream. -// This is important since it informs -func (q *QUICConnection) dispatchRequest(ctx context.Context, stream *rpcquic.RequestServerStream, err error, request *pogs.ConnectRequest) (error, bool) { - originProxy, err := q.orchestrator.GetOriginProxy() - if err != nil { - return err, false - } - - switch request.Type { - case pogs.ConnectionTypeHTTP, pogs.ConnectionTypeWebsocket: - tracedReq, err := buildHTTPRequest(ctx, request, stream, q.connIndex, q.logger) - if err != nil { - return err, false - } - w := newHTTPResponseAdapter(stream) - return originProxy.ProxyHTTP(&w, tracedReq, request.Type == pogs.ConnectionTypeWebsocket), w.connectResponseSent - - case pogs.ConnectionTypeTCP: - rwa := &streamReadWriteAcker{RequestServerStream: stream} - metadata := request.MetadataMap() - return originProxy.ProxyTCP(ctx, rwa, &TCPRequest{ - Dest: request.Dest, - FlowID: metadata[QUICMetadataFlowID], - CfTraceID: metadata[tracing.TracerContextName], - ConnIndex: q.connIndex, - }), rwa.connectResponseSent - default: - return errors.Errorf("unsupported error type: %s", request.Type), false - } -} - -// RegisterUdpSession is the RPC method invoked by edge to register and run a session -func (q *QUICConnection) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeAfterIdleHint time.Duration, traceContext string) (*tunnelpogs.RegisterUdpSessionResponse, error) { - traceCtx := tracing.NewTracedContext(ctx, traceContext, q.logger) - ctx, registerSpan := traceCtx.Tracer().Start(traceCtx, "register-session", trace.WithAttributes( - attribute.String("session-id", sessionID.String()), - attribute.String("dst", fmt.Sprintf("%s:%d", dstIP, dstPort)), - )) - log := q.logger.With().Int(management.EventTypeKey, int(management.UDP)).Logger() - // Each session is a series of datagram from an eyeball to a dstIP:dstPort. - // (src port, dst IP, dst port) uniquely identifies a session, so it needs a dedicated connected socket. - originProxy, err := ingress.DialUDP(dstIP, dstPort) - if err != nil { - log.Err(err).Msgf("Failed to create udp proxy to %s:%d", dstIP, dstPort) - tracing.EndWithErrorStatus(registerSpan, err) - return nil, err - } - registerSpan.SetAttributes( - attribute.Bool("socket-bind-success", true), - attribute.String("src", originProxy.LocalAddr().String()), - ) - - session, err := q.sessionManager.RegisterSession(ctx, sessionID, originProxy) - if err != nil { - originProxy.Close() - log.Err(err).Str("sessionID", sessionID.String()).Msgf("Failed to register udp session") - tracing.EndWithErrorStatus(registerSpan, err) - return nil, err - } - - go q.serveUDPSession(session, closeAfterIdleHint) - - log.Debug(). - Str("sessionID", sessionID.String()). - Str("src", originProxy.LocalAddr().String()). - Str("dst", fmt.Sprintf("%s:%d", dstIP, dstPort)). - Msgf("Registered session") - tracing.End(registerSpan) - - resp := tunnelpogs.RegisterUdpSessionResponse{ - Spans: traceCtx.GetProtoSpans(), - } - - return &resp, nil -} - -func (q *QUICConnection) serveUDPSession(session *datagramsession.Session, closeAfterIdleHint time.Duration) { - ctx := q.session.Context() - closedByRemote, err := session.Serve(ctx, closeAfterIdleHint) - // If session is terminated by remote, then we know it has been unregistered from session manager and edge - if !closedByRemote { - if err != nil { - q.closeUDPSession(ctx, session.ID, err.Error()) - } else { - q.closeUDPSession(ctx, session.ID, "terminated without error") - } - } - q.logger.Debug().Err(err). - Int(management.EventTypeKey, int(management.UDP)). - Str("sessionID", session.ID.String()). - Msg("Session terminated") -} - -// closeUDPSession first unregisters the session from session manager, then it tries to unregister from edge -func (q *QUICConnection) closeUDPSession(ctx context.Context, sessionID uuid.UUID, message string) { - q.sessionManager.UnregisterSession(ctx, sessionID, message, false) - quicStream, err := q.session.OpenStream() - if err != nil { - // Log this at debug because this is not an error if session was closed due to lost connection - // with edge - q.logger.Debug().Err(err). - Int(management.EventTypeKey, int(management.UDP)). - Str("sessionID", sessionID.String()). - Msgf("Failed to open quic stream to unregister udp session with edge") - return - } - - stream := cfdquic.NewSafeStreamCloser(quicStream, q.streamWriteTimeout, q.logger) - defer stream.Close() - rpcClientStream, err := rpcquic.NewSessionClient(ctx, stream, q.rpcTimeout) - if err != nil { - // Log this at debug because this is not an error if session was closed due to lost connection - // with edge - q.logger.Err(err).Str("sessionID", sessionID.String()). - Msgf("Failed to open rpc stream to unregister udp session with edge") - return - } - defer rpcClientStream.Close() - - if err := rpcClientStream.UnregisterUdpSession(ctx, sessionID, message); err != nil { - q.logger.Err(err).Str("sessionID", sessionID.String()). - Msgf("Failed to unregister udp session with edge") - } -} - -// UnregisterUdpSession is the RPC method invoked by edge to unregister and terminate a sesssion -func (q *QUICConnection) UnregisterUdpSession(ctx context.Context, sessionID uuid.UUID, message string) error { - return q.sessionManager.UnregisterSession(ctx, sessionID, message, true) -} - -// UpdateConfiguration is the RPC method invoked by edge when there is a new configuration -func (q *QUICConnection) UpdateConfiguration(ctx context.Context, version int32, config []byte) *tunnelpogs.UpdateConfigurationResponse { - return q.orchestrator.UpdateConfig(version, config) -} - -// streamReadWriteAcker is a light wrapper over QUIC streams with a callback to send response back to -// the client. -type streamReadWriteAcker struct { - *rpcquic.RequestServerStream - connectResponseSent bool -} - -// AckConnection acks response back to the proxy. -func (s *streamReadWriteAcker) AckConnection(tracePropagation string) error { - metadata := []pogs.Metadata{} - // Only add tracing if provided by origintunneld - if tracePropagation != "" { - metadata = append(metadata, pogs.Metadata{ - Key: tracing.CanonicalCloudflaredTracingHeader, - Val: tracePropagation, - }) - } - s.connectResponseSent = true - return s.WriteConnectResponseData(nil, metadata...) -} - -// httpResponseAdapter translates responses written by the HTTP Proxy into ones that can be used in QUIC. -type httpResponseAdapter struct { - *rpcquic.RequestServerStream - headers http.Header - connectResponseSent bool -} - -func newHTTPResponseAdapter(s *rpcquic.RequestServerStream) httpResponseAdapter { - return httpResponseAdapter{RequestServerStream: s, headers: make(http.Header)} -} - -func (hrw *httpResponseAdapter) AddTrailer(trailerName, trailerValue string) { - // we do not support trailers over QUIC -} - -func (hrw *httpResponseAdapter) WriteRespHeaders(status int, header http.Header) error { - metadata := make([]pogs.Metadata, 0) - metadata = append(metadata, pogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(status)}) - for k, vv := range header { - for _, v := range vv { - httpHeaderKey := fmt.Sprintf("%s:%s", HTTPHeaderKey, k) - metadata = append(metadata, pogs.Metadata{Key: httpHeaderKey, Val: v}) - } - } - - return hrw.WriteConnectResponseData(nil, metadata...) -} - -func (hrw *httpResponseAdapter) Write(p []byte) (int, error) { - // Make sure to send WriteHeader response if not called yet - if !hrw.connectResponseSent { - hrw.WriteRespHeaders(http.StatusOK, hrw.headers) - } - return hrw.RequestServerStream.Write(p) -} - -func (hrw *httpResponseAdapter) Header() http.Header { - return hrw.headers -} - -// This is a no-op Flush because this adapter is over a quic.Stream and we don't need Flush here. -func (hrw *httpResponseAdapter) Flush() {} - -func (hrw *httpResponseAdapter) WriteHeader(status int) { - hrw.WriteRespHeaders(status, hrw.headers) -} - -func (hrw *httpResponseAdapter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - conn := &localProxyConnection{hrw.ReadWriteCloser} - readWriter := bufio.NewReadWriter( - bufio.NewReader(hrw.ReadWriteCloser), - bufio.NewWriter(hrw.ReadWriteCloser), - ) - return conn, readWriter, nil -} - -func (hrw *httpResponseAdapter) WriteErrorResponse(err error) { - hrw.WriteConnectResponseData(err, pogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(http.StatusBadGateway)}) -} - -func (hrw *httpResponseAdapter) WriteConnectResponseData(respErr error, metadata ...pogs.Metadata) error { - hrw.connectResponseSent = true - return hrw.RequestServerStream.WriteConnectResponseData(respErr, metadata...) -} - -func buildHTTPRequest( - ctx context.Context, - connectRequest *pogs.ConnectRequest, - body io.ReadCloser, - connIndex uint8, - log *zerolog.Logger, -) (*tracing.TracedHTTPRequest, error) { - metadata := connectRequest.MetadataMap() - dest := connectRequest.Dest - method := metadata[HTTPMethodKey] - host := metadata[HTTPHostKey] - isWebsocket := connectRequest.Type == pogs.ConnectionTypeWebsocket - - req, err := http.NewRequestWithContext(ctx, method, dest, body) - if err != nil { - return nil, err - } - - req.Host = host - for _, metadata := range connectRequest.Metadata { - if strings.Contains(metadata.Key, HTTPHeaderKey) { - // metadata.Key is off the format httpHeaderKey: - httpHeaderKey := strings.Split(metadata.Key, ":") - if len(httpHeaderKey) != 2 { - return nil, fmt.Errorf("header Key: %s malformed", metadata.Key) - } - req.Header.Add(httpHeaderKey[1], metadata.Val) - } - } - // Go's http.Client automatically sends chunked request body if this value is not set on the - // *http.Request struct regardless of header: - // https://go.googlesource.com/go/+/go1.8rc2/src/net/http/transfer.go#154. - if err := setContentLength(req); err != nil { - return nil, fmt.Errorf("Error setting content-length: %w", err) - } - - // Go's client defaults to chunked encoding after a 200ms delay if the following cases are true: - // * the request body blocks - // * the content length is not set (or set to -1) - // * the method doesn't usually have a body (GET, HEAD, DELETE, ...) - // * there is no transfer-encoding=chunked already set. - // So, if transfer cannot be chunked and content length is 0, we dont set a request body. - if !isWebsocket && !isTransferEncodingChunked(req) && req.ContentLength == 0 { - req.Body = http.NoBody - } - stripWebsocketUpgradeHeader(req) - - // Check for tracing on request - tracedReq := tracing.NewTracedHTTPRequest(req, connIndex, log) - return tracedReq, err -} - -func setContentLength(req *http.Request) error { - var err error - if contentLengthStr := req.Header.Get("Content-Length"); contentLengthStr != "" { - req.ContentLength, err = strconv.ParseInt(contentLengthStr, 10, 64) - } - return err -} - -func isTransferEncodingChunked(req *http.Request) bool { - transferEncodingVal := req.Header.Get("Transfer-Encoding") - // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Transfer-Encoding suggests that this can be a comma - // separated value as well. - return strings.Contains(strings.ToLower(transferEncodingVal), "chunked") -} - -// A helper struct that guarantees a call to close only affects read side, but not write side. -type nopCloserReadWriter struct { - io.ReadWriteCloser - - // for use by Read only - // we don't need a memory barrier here because there is an implicit assumption that - // Read calls can't happen concurrently by different go-routines. - sawEOF bool - // should be updated and read using atomic primitives. - // value is read in Read method and written in Close method, which could be done by different - // go-routines. - closed uint32 -} - -func (np *nopCloserReadWriter) Read(p []byte) (n int, err error) { - if np.sawEOF { - return 0, io.EOF - } - - if atomic.LoadUint32(&np.closed) > 0 { - return 0, fmt.Errorf("closed by handler") - } - - n, err = np.ReadWriteCloser.Read(p) - if err == io.EOF { - np.sawEOF = true - } - - return -} - -func (np *nopCloserReadWriter) Close() error { - atomic.StoreUint32(&np.closed, 1) - - return nil -} - -// muxerWrapper wraps DatagramMuxerV2 to satisfy the packet.FunnelUniPipe interface -type muxerWrapper struct { - muxer *cfdquic.DatagramMuxerV2 -} - -func (rp *muxerWrapper) SendPacket(dst netip.Addr, pk packet.RawPacket) error { - return rp.muxer.SendPacket(cfdquic.RawPacket(pk)) -} - -func (rp *muxerWrapper) ReceivePacket(ctx context.Context) (packet.RawPacket, error) { - pk, err := rp.muxer.ReceivePacket(ctx) - if err != nil { - return packet.RawPacket{}, err - } - rawPacket, ok := pk.(cfdquic.RawPacket) - if ok { - return packet.RawPacket(rawPacket), nil - } - return packet.RawPacket{}, fmt.Errorf("unexpected packet type %+v", pk) -} - -func (rp *muxerWrapper) Close() error { - return nil -} - -func createUDPConnForConnIndex(connIndex uint8, localIP net.IP, logger *zerolog.Logger) (*net.UDPConn, error) { +func createUDPConnForConnIndex(connIndex uint8, localIP net.IP, edgeIP netip.AddrPort, logger *zerolog.Logger) (*net.UDPConn, error) { portMapMutex.Lock() defer portMapMutex.Unlock() - if localIP == nil { - localIP = net.IPv4zero - } - listenNetwork := "udp" - // https://github.com/quic-go/quic-go/issues/3793 DF bit cannot be set for dual stack listener on OSX + // https://github.com/quic-go/quic-go/issues/3793 DF bit cannot be set for dual stack listener ("udp") on macOS, + // to set the DF bit properly, the network string needs to be specific to the IP family. if runtime.GOOS == "darwin" { - if localIP.To4() != nil { + if edgeIP.Addr().Is4() { listenNetwork = "udp4" } else { listenNetwork = "udp6" diff --git a/connection/quic_connection.go b/connection/quic_connection.go new file mode 100644 index 00000000..7a20e15a --- /dev/null +++ b/connection/quic_connection.go @@ -0,0 +1,417 @@ +package connection + +import ( + "bufio" + "context" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/pkg/errors" + "github.com/quic-go/quic-go" + "github.com/rs/zerolog" + "golang.org/x/sync/errgroup" + + cfdquic "github.com/cloudflare/cloudflared/quic" + "github.com/cloudflare/cloudflared/tracing" + "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + rpcquic "github.com/cloudflare/cloudflared/tunnelrpc/quic" +) + +const ( + // HTTPHeaderKey is used to get or set http headers in QUIC ALPN if the underlying proxy connection type is HTTP. + HTTPHeaderKey = "HttpHeader" + // HTTPMethodKey is used to get or set http method in QUIC ALPN if the underlying proxy connection type is HTTP. + HTTPMethodKey = "HttpMethod" + // HTTPHostKey is used to get or set http host in QUIC ALPN if the underlying proxy connection type is HTTP. + HTTPHostKey = "HttpHost" + + QUICMetadataFlowID = "FlowID" +) + +// quicConnection represents the type that facilitates Proxying via QUIC streams. +type quicConnection struct { + conn quic.Connection + logger *zerolog.Logger + orchestrator Orchestrator + datagramHandler DatagramSessionHandler + controlStreamHandler ControlStreamHandler + connOptions *tunnelpogs.ConnectionOptions + connIndex uint8 + + rpcTimeout time.Duration + streamWriteTimeout time.Duration + gracePeriod time.Duration +} + +// NewTunnelConnection takes a [quic.Connection] to wrap it for use with cloudflared application logic. +func NewTunnelConnection( + ctx context.Context, + conn quic.Connection, + connIndex uint8, + orchestrator Orchestrator, + datagramSessionHandler DatagramSessionHandler, + controlStreamHandler ControlStreamHandler, + connOptions *pogs.ConnectionOptions, + rpcTimeout time.Duration, + streamWriteTimeout time.Duration, + gracePeriod time.Duration, + logger *zerolog.Logger, +) (TunnelConnection, error) { + return &quicConnection{ + conn: conn, + logger: logger, + orchestrator: orchestrator, + datagramHandler: datagramSessionHandler, + controlStreamHandler: controlStreamHandler, + connOptions: connOptions, + connIndex: connIndex, + rpcTimeout: rpcTimeout, + streamWriteTimeout: streamWriteTimeout, + gracePeriod: gracePeriod, + }, nil +} + +// Serve starts a QUIC connection that begins accepting streams. +func (q *quicConnection) Serve(ctx context.Context) error { + // The edge assumes the first stream is used for the control plane + controlStream, err := q.conn.OpenStream() + if err != nil { + return fmt.Errorf("failed to open a registration control stream: %w", err) + } + + // If either goroutine returns nil error, we rely on this cancellation to make sure the other goroutine exits + // as fast as possible as well. Nil error means we want to exit for good (caller code won't retry serving this + // connection). + // If either goroutine returns a non nil error, then the error group cancels the context, thus also canceling the + // other goroutine as fast as possible. + ctx, cancel := context.WithCancel(ctx) + errGroup, ctx := errgroup.WithContext(ctx) + + // In the future, if cloudflared can autonomously push traffic to the edge, we have to make sure the control + // stream is already fully registered before the other goroutines can proceed. + errGroup.Go(func() error { + // err is equal to nil if we exit due to unregistration. If that happens we want to wait the full + // amount of the grace period, allowing requests to finish before we cancel the context, which will + // make cloudflared exit. + if err := q.serveControlStream(ctx, controlStream); err == nil { + select { + case <-ctx.Done(): + case <-time.Tick(q.gracePeriod): + } + } + cancel() + return err + + }) + errGroup.Go(func() error { + defer cancel() + return q.acceptStream(ctx) + }) + errGroup.Go(func() error { + defer cancel() + return q.datagramHandler.Serve(ctx) + }) + + return errGroup.Wait() +} + +// serveControlStream will serve the RPC; blocking until the control plane is done. +func (q *quicConnection) serveControlStream(ctx context.Context, controlStream quic.Stream) error { + return q.controlStreamHandler.ServeControlStream(ctx, controlStream, q.connOptions, q.orchestrator) +} + +// Close the connection with no errors specified. +func (q *quicConnection) Close() { + q.conn.CloseWithError(0, "") +} + +func (q *quicConnection) acceptStream(ctx context.Context) error { + defer q.Close() + for { + quicStream, err := q.conn.AcceptStream(ctx) + if err != nil { + // context.Canceled is usually a user ctrl+c. We don't want to log an error here as it's intentional. + if errors.Is(err, context.Canceled) || q.controlStreamHandler.IsStopped() { + return nil + } + return fmt.Errorf("failed to accept QUIC stream: %w", err) + } + go q.runStream(quicStream) + } +} + +func (q *quicConnection) runStream(quicStream quic.Stream) { + ctx := quicStream.Context() + stream := cfdquic.NewSafeStreamCloser(quicStream, q.streamWriteTimeout, q.logger) + defer stream.Close() + + // we are going to fuse readers/writers from stream <- cloudflared -> origin, and we want to guarantee that + // code executed in the code path of handleStream don't trigger an earlier close to the downstream write stream. + // So, we wrap the stream with a no-op write closer and only this method can actually close write side of the stream. + // A call to close will simulate a close to the read-side, which will fail subsequent reads. + noCloseStream := &nopCloserReadWriter{ReadWriteCloser: stream} + ss := rpcquic.NewCloudflaredServer(q.handleDataStream, q.datagramHandler, q, q.rpcTimeout) + if err := ss.Serve(ctx, noCloseStream); err != nil { + q.logger.Debug().Err(err).Msg("Failed to handle QUIC stream") + + // if we received an error at this level, then close write side of stream with an error, which will result in + // RST_STREAM frame. + quicStream.CancelWrite(0) + } +} + +func (q *quicConnection) handleDataStream(ctx context.Context, stream *rpcquic.RequestServerStream) error { + request, err := stream.ReadConnectRequestData() + if err != nil { + return err + } + + if err, connectResponseSent := q.dispatchRequest(ctx, stream, request); err != nil { + q.logger.Err(err).Str("type", request.Type.String()).Str("dest", request.Dest).Msg("Request failed") + + // if the connectResponse was already sent and we had an error, we need to propagate it up, so that the stream is + // closed with an RST_STREAM frame + if connectResponseSent { + return err + } + + if writeRespErr := stream.WriteConnectResponseData(err); writeRespErr != nil { + return writeRespErr + } + } + + return nil +} + +// dispatchRequest will dispatch the request to the origin depending on the type and returns an error if it occurs. +// Also returns if the connect response was sent to the downstream during processing of the origin request. +func (q *quicConnection) dispatchRequest(ctx context.Context, stream *rpcquic.RequestServerStream, request *pogs.ConnectRequest) (err error, connectResponseSent bool) { + originProxy, err := q.orchestrator.GetOriginProxy() + if err != nil { + return err, false + } + + switch request.Type { + case pogs.ConnectionTypeHTTP, pogs.ConnectionTypeWebsocket: + tracedReq, err := buildHTTPRequest(ctx, request, stream, q.connIndex, q.logger) + if err != nil { + return err, false + } + w := newHTTPResponseAdapter(stream) + return originProxy.ProxyHTTP(&w, tracedReq, request.Type == pogs.ConnectionTypeWebsocket), w.connectResponseSent + + case pogs.ConnectionTypeTCP: + rwa := &streamReadWriteAcker{RequestServerStream: stream} + metadata := request.MetadataMap() + return originProxy.ProxyTCP(ctx, rwa, &TCPRequest{ + Dest: request.Dest, + FlowID: metadata[QUICMetadataFlowID], + CfTraceID: metadata[tracing.TracerContextName], + ConnIndex: q.connIndex, + }), rwa.connectResponseSent + default: + return errors.Errorf("unsupported error type: %s", request.Type), false + } +} + +// UpdateConfiguration is the RPC method invoked by edge when there is a new configuration +func (q *quicConnection) UpdateConfiguration(ctx context.Context, version int32, config []byte) *tunnelpogs.UpdateConfigurationResponse { + return q.orchestrator.UpdateConfig(version, config) +} + +// streamReadWriteAcker is a light wrapper over QUIC streams with a callback to send response back to +// the client. +type streamReadWriteAcker struct { + *rpcquic.RequestServerStream + connectResponseSent bool +} + +// AckConnection acks response back to the proxy. +func (s *streamReadWriteAcker) AckConnection(tracePropagation string) error { + metadata := []pogs.Metadata{} + // Only add tracing if provided by the edge request + if tracePropagation != "" { + metadata = append(metadata, pogs.Metadata{ + Key: tracing.CanonicalCloudflaredTracingHeader, + Val: tracePropagation, + }) + } + s.connectResponseSent = true + return s.WriteConnectResponseData(nil, metadata...) +} + +// httpResponseAdapter translates responses written by the HTTP Proxy into ones that can be used in QUIC. +type httpResponseAdapter struct { + *rpcquic.RequestServerStream + headers http.Header + connectResponseSent bool +} + +func newHTTPResponseAdapter(s *rpcquic.RequestServerStream) httpResponseAdapter { + return httpResponseAdapter{RequestServerStream: s, headers: make(http.Header)} +} + +func (hrw *httpResponseAdapter) AddTrailer(trailerName, trailerValue string) { + // we do not support trailers over QUIC +} + +func (hrw *httpResponseAdapter) WriteRespHeaders(status int, header http.Header) error { + metadata := make([]pogs.Metadata, 0) + metadata = append(metadata, pogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(status)}) + for k, vv := range header { + for _, v := range vv { + httpHeaderKey := fmt.Sprintf("%s:%s", HTTPHeaderKey, k) + metadata = append(metadata, pogs.Metadata{Key: httpHeaderKey, Val: v}) + } + } + + return hrw.WriteConnectResponseData(nil, metadata...) +} + +func (hrw *httpResponseAdapter) Write(p []byte) (int, error) { + // Make sure to send WriteHeader response if not called yet + if !hrw.connectResponseSent { + hrw.WriteRespHeaders(http.StatusOK, hrw.headers) + } + return hrw.RequestServerStream.Write(p) +} + +func (hrw *httpResponseAdapter) Header() http.Header { + return hrw.headers +} + +// This is a no-op Flush because this adapter is over a quic.Stream and we don't need Flush here. +func (hrw *httpResponseAdapter) Flush() {} + +func (hrw *httpResponseAdapter) WriteHeader(status int) { + hrw.WriteRespHeaders(status, hrw.headers) +} + +func (hrw *httpResponseAdapter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + conn := &localProxyConnection{hrw.ReadWriteCloser} + readWriter := bufio.NewReadWriter( + bufio.NewReader(hrw.ReadWriteCloser), + bufio.NewWriter(hrw.ReadWriteCloser), + ) + return conn, readWriter, nil +} + +func (hrw *httpResponseAdapter) WriteErrorResponse(err error) { + hrw.WriteConnectResponseData(err, pogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(http.StatusBadGateway)}) +} + +func (hrw *httpResponseAdapter) WriteConnectResponseData(respErr error, metadata ...pogs.Metadata) error { + hrw.connectResponseSent = true + return hrw.RequestServerStream.WriteConnectResponseData(respErr, metadata...) +} + +func buildHTTPRequest( + ctx context.Context, + connectRequest *pogs.ConnectRequest, + body io.ReadCloser, + connIndex uint8, + log *zerolog.Logger, +) (*tracing.TracedHTTPRequest, error) { + metadata := connectRequest.MetadataMap() + dest := connectRequest.Dest + method := metadata[HTTPMethodKey] + host := metadata[HTTPHostKey] + isWebsocket := connectRequest.Type == pogs.ConnectionTypeWebsocket + + req, err := http.NewRequestWithContext(ctx, method, dest, body) + if err != nil { + return nil, err + } + + req.Host = host + for _, metadata := range connectRequest.Metadata { + if strings.Contains(metadata.Key, HTTPHeaderKey) { + // metadata.Key is off the format httpHeaderKey: + httpHeaderKey := strings.Split(metadata.Key, ":") + if len(httpHeaderKey) != 2 { + return nil, fmt.Errorf("header Key: %s malformed", metadata.Key) + } + req.Header.Add(httpHeaderKey[1], metadata.Val) + } + } + // Go's http.Client automatically sends chunked request body if this value is not set on the + // *http.Request struct regardless of header: + // https://go.googlesource.com/go/+/go1.8rc2/src/net/http/transfer.go#154. + if err := setContentLength(req); err != nil { + return nil, fmt.Errorf("Error setting content-length: %w", err) + } + + // Go's client defaults to chunked encoding after a 200ms delay if the following cases are true: + // * the request body blocks + // * the content length is not set (or set to -1) + // * the method doesn't usually have a body (GET, HEAD, DELETE, ...) + // * there is no transfer-encoding=chunked already set. + // So, if transfer cannot be chunked and content length is 0, we dont set a request body. + if !isWebsocket && !isTransferEncodingChunked(req) && req.ContentLength == 0 { + req.Body = http.NoBody + } + stripWebsocketUpgradeHeader(req) + + // Check for tracing on request + tracedReq := tracing.NewTracedHTTPRequest(req, connIndex, log) + return tracedReq, err +} + +func setContentLength(req *http.Request) error { + var err error + if contentLengthStr := req.Header.Get("Content-Length"); contentLengthStr != "" { + req.ContentLength, err = strconv.ParseInt(contentLengthStr, 10, 64) + } + return err +} + +func isTransferEncodingChunked(req *http.Request) bool { + transferEncodingVal := req.Header.Get("Transfer-Encoding") + // https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Transfer-Encoding suggests that this can be a comma + // separated value as well. + return strings.Contains(strings.ToLower(transferEncodingVal), "chunked") +} + +// A helper struct that guarantees a call to close only affects read side, but not write side. +type nopCloserReadWriter struct { + io.ReadWriteCloser + + // for use by Read only + // we don't need a memory barrier here because there is an implicit assumption that + // Read calls can't happen concurrently by different go-routines. + sawEOF bool + // should be updated and read using atomic primitives. + // value is read in Read method and written in Close method, which could be done by different + // go-routines. + closed uint32 +} + +func (np *nopCloserReadWriter) Read(p []byte) (n int, err error) { + if np.sawEOF { + return 0, io.EOF + } + + if atomic.LoadUint32(&np.closed) > 0 { + return 0, fmt.Errorf("closed by handler") + } + + n, err = np.ReadWriteCloser.Read(p) + if err == io.EOF { + np.sawEOF = true + } + + return +} + +func (np *nopCloserReadWriter) Close() error { + atomic.StoreUint32(&np.closed, 1) + + return nil +} diff --git a/connection/quic_test.go b/connection/quic_connection_test.go similarity index 87% rename from connection/quic_test.go rename to connection/quic_connection_test.go index c81d53fb..1c22605b 100644 --- a/connection/quic_test.go +++ b/connection/quic_connection_test.go @@ -13,8 +13,8 @@ import ( "math/big" "net" "net/http" + "net/netip" "net/url" - "os" "strings" "testing" "time" @@ -26,12 +26,14 @@ import ( "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "golang.org/x/net/nettest" "github.com/cloudflare/cloudflared/datagramsession" + "github.com/cloudflare/cloudflared/ingress" + "github.com/cloudflare/cloudflared/packet" cfdquic "github.com/cloudflare/cloudflared/quic" "github.com/cloudflare/cloudflared/tracing" "github.com/cloudflare/cloudflared/tunnelrpc/pogs" - tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" rpcquic "github.com/cloudflare/cloudflared/tunnelrpc/quic" ) @@ -162,11 +164,11 @@ func TestQUICServer(t *testing.T) { close(serverDone) }() - qc := testQUICConnection(udpListener.LocalAddr(), t, uint8(i)) + tunnelConn, _ := testTunnelConnection(t, netip.MustParseAddrPort(udpListener.LocalAddr().String()), uint8(i)) connDone := make(chan struct{}) go func() { - qc.Serve(ctx) + tunnelConn.Serve(ctx) close(connDone) }() @@ -513,7 +515,6 @@ func TestServeUDPSession(t *testing.T) { defer udpListener.Close() ctx, cancel := context.WithCancel(context.Background()) - val := udpListener.LocalAddr() // Establish QUIC connection with edge edgeQUICSessionChan := make(chan quic.Connection) @@ -527,13 +528,14 @@ func TestServeUDPSession(t *testing.T) { }() // Random index to avoid reusing port - qc := testQUICConnection(val, t, 28) - go qc.Serve(ctx) + tunnelConn, datagramConn := testTunnelConnection(t, netip.MustParseAddrPort(udpListener.LocalAddr().String()), 28) + go tunnelConn.Serve(ctx) edgeQUICSession := <-edgeQUICSessionChan - serveSession(ctx, qc, edgeQUICSession, closedByOrigin, io.EOF.Error(), t) - serveSession(ctx, qc, edgeQUICSession, closedByTimeout, datagramsession.SessionIdleErr(time.Millisecond*50).Error(), t) - serveSession(ctx, qc, edgeQUICSession, closedByRemote, "eyeball closed connection", t) + + serveSession(ctx, datagramConn, edgeQUICSession, closedByOrigin, io.EOF.Error(), t) + serveSession(ctx, datagramConn, edgeQUICSession, closedByTimeout, datagramsession.SessionIdleErr(time.Millisecond*50).Error(), t) + serveSession(ctx, datagramConn, edgeQUICSession, closedByRemote, "eyeball closed connection", t) cancel() } @@ -576,8 +578,20 @@ func TestNopCloserReadWriterCloseAfterEOF(t *testing.T) { } func TestCreateUDPConnReuseSourcePort(t *testing.T) { + edgeIPv4 := netip.MustParseAddrPort("0.0.0.0:0") + edgeIPv6 := netip.MustParseAddrPort("[::]:0") + + // We assume the test environment has access to an IPv4 interface + testCreateUDPConnReuseSourcePortForEdgeIP(t, edgeIPv4) + + if nettest.SupportsIPv6() { + testCreateUDPConnReuseSourcePortForEdgeIP(t, edgeIPv6) + } +} + +func testCreateUDPConnReuseSourcePortForEdgeIP(t *testing.T, edgeIP netip.AddrPort) { logger := zerolog.Nop() - conn, err := createUDPConnForConnIndex(0, nil, &logger) + conn, err := createUDPConnForConnIndex(0, nil, edgeIP, &logger) require.NoError(t, err) getPortFunc := func(conn *net.UDPConn) int { @@ -591,34 +605,34 @@ func TestCreateUDPConnReuseSourcePort(t *testing.T) { conn.Close() // should get the same port as before. - conn, err = createUDPConnForConnIndex(0, nil, &logger) + conn, err = createUDPConnForConnIndex(0, nil, edgeIP, &logger) require.NoError(t, err) require.Equal(t, initialPort, getPortFunc(conn)) // new index, should get a different port - conn1, err := createUDPConnForConnIndex(1, nil, &logger) + conn1, err := createUDPConnForConnIndex(1, nil, edgeIP, &logger) require.NoError(t, err) require.NotEqual(t, initialPort, getPortFunc(conn1)) // not closing the conn and trying to obtain a new conn for same index should give a different random port - conn, err = createUDPConnForConnIndex(0, nil, &logger) + conn, err = createUDPConnForConnIndex(0, nil, edgeIP, &logger) require.NoError(t, err) require.NotEqual(t, initialPort, getPortFunc(conn)) } -func serveSession(ctx context.Context, qc *QUICConnection, edgeQUICSession quic.Connection, closeType closeReason, expectedReason string, t *testing.T) { +func serveSession(ctx context.Context, datagramConn *datagramV2Connection, edgeQUICSession quic.Connection, closeType closeReason, expectedReason string, t *testing.T) { var ( payload = []byte(t.Name()) ) sessionID := uuid.New() cfdConn, originConn := net.Pipe() // Registers and run a new session - session, err := qc.sessionManager.RegisterSession(ctx, sessionID, cfdConn) + session, err := datagramConn.sessionManager.RegisterSession(ctx, sessionID, cfdConn) require.NoError(t, err) sessionDone := make(chan struct{}) go func() { - qc.serveUDPSession(session, time.Millisecond*50) + datagramConn.serveUDPSession(session, time.Millisecond*50) close(sessionDone) }() @@ -642,7 +656,7 @@ func serveSession(ctx context.Context, qc *QUICConnection, edgeQUICSession quic. case closedByOrigin: originConn.Close() case closedByRemote: - err = qc.UnregisterUdpSession(ctx, sessionID, expectedReason) + err = datagramConn.UnregisterUdpSession(ctx, sessionID, expectedReason) require.NoError(t, err) case closedByTimeout: } @@ -713,32 +727,59 @@ func (s mockSessionRPCServer) UnregisterUdpSession(ctx context.Context, sessionI return nil } -func testQUICConnection(udpListenerAddr net.Addr, t *testing.T, index uint8) *QUICConnection { +func testTunnelConnection(t *testing.T, serverAddr netip.AddrPort, index uint8) (TunnelConnection, *datagramV2Connection) { tlsClientConfig := &tls.Config{ InsecureSkipVerify: true, NextProtos: []string{"argotunnel"}, } // Start a mock httpProxy - log := zerolog.New(os.Stdout) + log := zerolog.New(io.Discard) ctx, cancel := context.WithCancel(context.Background()) defer cancel() - qc, err := NewQUICConnection( + + // Dial the QUIC connection to the edge + conn, err := DialQuic( ctx, testQUICConfig, - udpListenerAddr, - nil, - index, tlsClientConfig, - &mockOrchestrator{originProxy: &mockOriginProxyWithRequest{}}, - &tunnelpogs.ConnectionOptions{}, - fakeControlStream{}, + serverAddr, + nil, // connect on a random port + index, &log, - nil, + ) + + // Start a session manager for the connection + sessionDemuxChan := make(chan *packet.Session, 4) + datagramMuxer := cfdquic.NewDatagramMuxerV2(conn, &log, sessionDemuxChan) + sessionManager := datagramsession.NewManager(&log, datagramMuxer.SendToSession, sessionDemuxChan) + var connIndex uint8 = 0 + packetRouter := ingress.NewPacketRouter(nil, datagramMuxer, connIndex, &log) + + datagramConn := &datagramV2Connection{ + conn, + sessionManager, + datagramMuxer, + packetRouter, + 15 * time.Second, + 0 * time.Second, + &log, + } + + tunnelConn, err := NewTunnelConnection( + ctx, + conn, + index, + &mockOrchestrator{originProxy: &mockOriginProxyWithRequest{}}, + datagramConn, + fakeControlStream{}, + &pogs.ConnectionOptions{}, 15*time.Second, 0*time.Second, + 0*time.Second, + &log, ) require.NoError(t, err) - return qc + return tunnelConn, datagramConn } type mockReaderNoopWriter struct { diff --git a/connection/quic_datagram_v2.go b/connection/quic_datagram_v2.go new file mode 100644 index 00000000..c6b8bc03 --- /dev/null +++ b/connection/quic_datagram_v2.go @@ -0,0 +1,201 @@ +package connection + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/google/uuid" + "github.com/quic-go/quic-go" + "github.com/rs/zerolog" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "golang.org/x/sync/errgroup" + + "github.com/cloudflare/cloudflared/datagramsession" + "github.com/cloudflare/cloudflared/ingress" + "github.com/cloudflare/cloudflared/management" + "github.com/cloudflare/cloudflared/packet" + cfdquic "github.com/cloudflare/cloudflared/quic" + "github.com/cloudflare/cloudflared/tracing" + "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + rpcquic "github.com/cloudflare/cloudflared/tunnelrpc/quic" +) + +const ( + // emperically this capacity has been working well + demuxChanCapacity = 16 +) + +// DatagramSessionHandler is a service that can serve datagrams for a connection and handle sessions from incoming +// connection streams. +type DatagramSessionHandler interface { + Serve(context.Context) error + + pogs.SessionManager +} + +type datagramV2Connection struct { + conn quic.Connection + + // sessionManager tracks active sessions. It receives datagrams from quic connection via datagramMuxer + sessionManager datagramsession.Manager + // datagramMuxer mux/demux datagrams from quic connection + datagramMuxer *cfdquic.DatagramMuxerV2 + packetRouter *ingress.PacketRouter + + rpcTimeout time.Duration + streamWriteTimeout time.Duration + + logger *zerolog.Logger +} + +func NewDatagramV2Connection(ctx context.Context, + conn quic.Connection, + icmpRouter ingress.ICMPRouter, + index uint8, + rpcTimeout time.Duration, + streamWriteTimeout time.Duration, + logger *zerolog.Logger, +) DatagramSessionHandler { + sessionDemuxChan := make(chan *packet.Session, demuxChanCapacity) + datagramMuxer := cfdquic.NewDatagramMuxerV2(conn, logger, sessionDemuxChan) + sessionManager := datagramsession.NewManager(logger, datagramMuxer.SendToSession, sessionDemuxChan) + packetRouter := ingress.NewPacketRouter(icmpRouter, datagramMuxer, index, logger) + + return &datagramV2Connection{ + conn, + sessionManager, + datagramMuxer, + packetRouter, + rpcTimeout, + streamWriteTimeout, + logger, + } +} + +func (d *datagramV2Connection) Serve(ctx context.Context) error { + // If either goroutine returns nil error, we rely on this cancellation to make sure the other goroutine exits + // as fast as possible as well. Nil error means we want to exit for good (caller code won't retry serving this + // connection). + // If either goroutine returns a non nil error, then the error group cancels the context, thus also canceling the + // other goroutine as fast as possible. + ctx, cancel := context.WithCancel(ctx) + errGroup, ctx := errgroup.WithContext(ctx) + + errGroup.Go(func() error { + defer cancel() + return d.sessionManager.Serve(ctx) + }) + errGroup.Go(func() error { + defer cancel() + return d.datagramMuxer.ServeReceive(ctx) + }) + errGroup.Go(func() error { + defer cancel() + return d.packetRouter.Serve(ctx) + }) + + return errGroup.Wait() +} + +// RegisterUdpSession is the RPC method invoked by edge to register and run a session +func (q *datagramV2Connection) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeAfterIdleHint time.Duration, traceContext string) (*tunnelpogs.RegisterUdpSessionResponse, error) { + traceCtx := tracing.NewTracedContext(ctx, traceContext, q.logger) + ctx, registerSpan := traceCtx.Tracer().Start(traceCtx, "register-session", trace.WithAttributes( + attribute.String("session-id", sessionID.String()), + attribute.String("dst", fmt.Sprintf("%s:%d", dstIP, dstPort)), + )) + log := q.logger.With().Int(management.EventTypeKey, int(management.UDP)).Logger() + // Each session is a series of datagram from an eyeball to a dstIP:dstPort. + // (src port, dst IP, dst port) uniquely identifies a session, so it needs a dedicated connected socket. + originProxy, err := ingress.DialUDP(dstIP, dstPort) + if err != nil { + log.Err(err).Msgf("Failed to create udp proxy to %s:%d", dstIP, dstPort) + tracing.EndWithErrorStatus(registerSpan, err) + return nil, err + } + registerSpan.SetAttributes( + attribute.Bool("socket-bind-success", true), + attribute.String("src", originProxy.LocalAddr().String()), + ) + + session, err := q.sessionManager.RegisterSession(ctx, sessionID, originProxy) + if err != nil { + originProxy.Close() + log.Err(err).Str(datagramsession.LogFieldSessionID, datagramsession.FormatSessionID(sessionID)).Msgf("Failed to register udp session") + tracing.EndWithErrorStatus(registerSpan, err) + return nil, err + } + + go q.serveUDPSession(session, closeAfterIdleHint) + + log.Debug(). + Str(datagramsession.LogFieldSessionID, datagramsession.FormatSessionID(sessionID)). + Str("src", originProxy.LocalAddr().String()). + Str("dst", fmt.Sprintf("%s:%d", dstIP, dstPort)). + Msgf("Registered session") + tracing.End(registerSpan) + + resp := tunnelpogs.RegisterUdpSessionResponse{ + Spans: traceCtx.GetProtoSpans(), + } + + return &resp, nil +} + +// UnregisterUdpSession is the RPC method invoked by edge to unregister and terminate a sesssion +func (q *datagramV2Connection) UnregisterUdpSession(ctx context.Context, sessionID uuid.UUID, message string) error { + return q.sessionManager.UnregisterSession(ctx, sessionID, message, true) +} + +func (q *datagramV2Connection) serveUDPSession(session *datagramsession.Session, closeAfterIdleHint time.Duration) { + ctx := q.conn.Context() + closedByRemote, err := session.Serve(ctx, closeAfterIdleHint) + // If session is terminated by remote, then we know it has been unregistered from session manager and edge + if !closedByRemote { + if err != nil { + q.closeUDPSession(ctx, session.ID, err.Error()) + } else { + q.closeUDPSession(ctx, session.ID, "terminated without error") + } + } + q.logger.Debug().Err(err). + Int(management.EventTypeKey, int(management.UDP)). + Str(datagramsession.LogFieldSessionID, datagramsession.FormatSessionID(session.ID)). + Msg("Session terminated") +} + +// closeUDPSession first unregisters the session from session manager, then it tries to unregister from edge +func (q *datagramV2Connection) closeUDPSession(ctx context.Context, sessionID uuid.UUID, message string) { + q.sessionManager.UnregisterSession(ctx, sessionID, message, false) + quicStream, err := q.conn.OpenStream() + if err != nil { + // Log this at debug because this is not an error if session was closed due to lost connection + // with edge + q.logger.Debug().Err(err). + Int(management.EventTypeKey, int(management.UDP)). + Str(datagramsession.LogFieldSessionID, datagramsession.FormatSessionID(sessionID)). + Msgf("Failed to open quic stream to unregister udp session with edge") + return + } + + stream := cfdquic.NewSafeStreamCloser(quicStream, q.streamWriteTimeout, q.logger) + defer stream.Close() + rpcClientStream, err := rpcquic.NewSessionClient(ctx, stream, q.rpcTimeout) + if err != nil { + // Log this at debug because this is not an error if session was closed due to lost connection + // with edge + q.logger.Err(err).Str(datagramsession.LogFieldSessionID, datagramsession.FormatSessionID(sessionID)). + Msgf("Failed to open rpc stream to unregister udp session with edge") + return + } + defer rpcClientStream.Close() + + if err := rpcClientStream.UnregisterUdpSession(ctx, sessionID, message); err != nil { + q.logger.Err(err).Str(datagramsession.LogFieldSessionID, datagramsession.FormatSessionID(sessionID)). + Msgf("Failed to unregister udp session with edge") + } +} diff --git a/connection/quic_datagram_v3.go b/connection/quic_datagram_v3.go new file mode 100644 index 00000000..1b42600e --- /dev/null +++ b/connection/quic_datagram_v3.go @@ -0,0 +1,58 @@ +package connection + +import ( + "context" + "fmt" + "net" + "time" + + "github.com/google/uuid" + "github.com/quic-go/quic-go" + "github.com/rs/zerolog" + + "github.com/cloudflare/cloudflared/ingress" + "github.com/cloudflare/cloudflared/management" + cfdquic "github.com/cloudflare/cloudflared/quic/v3" + "github.com/cloudflare/cloudflared/tunnelrpc/pogs" +) + +type datagramV3Connection struct { + conn quic.Connection + // datagramMuxer mux/demux datagrams from quic connection + datagramMuxer cfdquic.DatagramConn + logger *zerolog.Logger +} + +func NewDatagramV3Connection(ctx context.Context, + conn quic.Connection, + sessionManager cfdquic.SessionManager, + icmpRouter ingress.ICMPRouter, + index uint8, + metrics cfdquic.Metrics, + logger *zerolog.Logger, +) DatagramSessionHandler { + log := logger. + With(). + Int(management.EventTypeKey, int(management.UDP)). + Uint8(LogFieldConnIndex, index). + Logger() + datagramMuxer := cfdquic.NewDatagramConn(conn, sessionManager, icmpRouter, index, metrics, &log) + + return &datagramV3Connection{ + conn, + datagramMuxer, + logger, + } +} + +func (d *datagramV3Connection) Serve(ctx context.Context) error { + return d.datagramMuxer.Serve(ctx) +} + +func (d *datagramV3Connection) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeAfterIdleHint time.Duration, traceContext string) (*pogs.RegisterUdpSessionResponse, error) { + return nil, fmt.Errorf("datagram v3 does not support RegisterUdpSession RPC") +} + +func (d *datagramV3Connection) UnregisterUdpSession(ctx context.Context, sessionID uuid.UUID, message string) error { + return fmt.Errorf("datagram v3 does not support UnregisterUdpSession RPC") +} diff --git a/datagramsession/manager.go b/datagramsession/manager.go index ae332bf6..f315a0d3 100644 --- a/datagramsession/manager.go +++ b/datagramsession/manager.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "io" + "strings" "time" "github.com/google/uuid" @@ -20,8 +21,15 @@ const ( var ( errSessionManagerClosed = fmt.Errorf("session manager closed") + LogFieldSessionID = "sessionID" ) +func FormatSessionID(sessionID uuid.UUID) string { + sessionIDStr := sessionID.String() + sessionIDStr = strings.ReplaceAll(sessionIDStr, "-", "") + return sessionIDStr +} + // Manager defines the APIs to manage sessions from the same transport. type Manager interface { // Serve starts the event loop @@ -127,7 +135,7 @@ func (m *manager) registerSession(ctx context.Context, registration *registerSes func (m *manager) newSession(id uuid.UUID, dstConn io.ReadWriteCloser) *Session { logger := m.log.With(). Int(management.EventTypeKey, int(management.UDP)). - Str("sessionID", id.String()).Logger() + Str(LogFieldSessionID, FormatSessionID(id)).Logger() return &Session{ ID: id, sendFunc: m.sendFunc, @@ -174,7 +182,7 @@ func (m *manager) unregisterSession(unregistration *unregisterSessionEvent) { func (m *manager) sendToSession(datagram *packet.Session) { session, ok := m.sessions[datagram.ID] if !ok { - m.log.Error().Str("sessionID", datagram.ID.String()).Msg("session not found") + m.log.Error().Str(LogFieldSessionID, FormatSessionID(datagram.ID)).Msg("session not found") return } // session writes to destination over a connected UDP socket, which should not be blocking, so this call doesn't diff --git a/dev.Dockerfile b/dev.Dockerfile index e8a8ceba..8986040a 100644 --- a/dev.Dockerfile +++ b/dev.Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22.2 as builder +FROM golang:1.22.5 as builder ENV GO111MODULE=on \ CGO_ENABLED=0 WORKDIR /go/src/github.com/cloudflare/cloudflared/ diff --git a/diagnostic/client.go b/diagnostic/client.go new file mode 100644 index 00000000..6e4dc2d3 --- /dev/null +++ b/diagnostic/client.go @@ -0,0 +1,216 @@ +package diagnostic + +import ( + "context" + "encoding/json" + "fmt" + "io" + "net/http" + "net/url" + "strconv" + + "github.com/cloudflare/cloudflared/logger" +) + +type httpClient struct { + http.Client + baseURL *url.URL +} + +func NewHTTPClient() *httpClient { + httpTransport := http.Transport{ + TLSHandshakeTimeout: defaultTimeout, + ResponseHeaderTimeout: defaultTimeout, + } + + return &httpClient{ + http.Client{ + Transport: &httpTransport, + Timeout: defaultTimeout, + }, + nil, + } +} + +func (client *httpClient) SetBaseURL(baseURL *url.URL) { + client.baseURL = baseURL +} + +func (client *httpClient) GET(ctx context.Context, endpoint string) (*http.Response, error) { + if client.baseURL == nil { + return nil, ErrNoBaseURL + } + url := client.baseURL.JoinPath(endpoint) + + req, err := http.NewRequestWithContext(ctx, http.MethodGet, url.String(), nil) + if err != nil { + return nil, fmt.Errorf("error creating GET request: %w", err) + } + + req.Header.Add("Accept", "application/json;version=1") + + response, err := client.Do(req) + if err != nil { + return nil, fmt.Errorf("error GET request: %w", err) + } + + return response, nil +} + +type LogConfiguration struct { + logFile string + logDirectory string + uid int // the uid of the user that started cloudflared +} + +func (client *httpClient) GetLogConfiguration(ctx context.Context) (*LogConfiguration, error) { + response, err := client.GET(ctx, cliConfigurationEndpoint) + if err != nil { + return nil, err + } + + defer response.Body.Close() + + var data map[string]string + if err := json.NewDecoder(response.Body).Decode(&data); err != nil { + return nil, fmt.Errorf("failed to decode body: %w", err) + } + + uidStr, exists := data[configurationKeyUID] + if !exists { + return nil, ErrKeyNotFound + } + + uid, err := strconv.Atoi(uidStr) + if err != nil { + return nil, fmt.Errorf("error convertin pid to int: %w", err) + } + + logFile, exists := data[logger.LogFileFlag] + if exists { + return &LogConfiguration{logFile, "", uid}, nil + } + + logDirectory, exists := data[logger.LogDirectoryFlag] + if exists { + return &LogConfiguration{"", logDirectory, uid}, nil + } + + // No log configured may happen when cloudflared is executed as a managed service or + // when containerized + return &LogConfiguration{"", "", uid}, nil +} + +func (client *httpClient) GetMemoryDump(ctx context.Context, writer io.Writer) error { + response, err := client.GET(ctx, memoryDumpEndpoint) + if err != nil { + return err + } + + return copyToWriter(response, writer) +} + +func (client *httpClient) GetGoroutineDump(ctx context.Context, writer io.Writer) error { + response, err := client.GET(ctx, goroutineDumpEndpoint) + if err != nil { + return err + } + + return copyToWriter(response, writer) +} + +func (client *httpClient) GetTunnelState(ctx context.Context) (*TunnelState, error) { + response, err := client.GET(ctx, tunnelStateEndpoint) + if err != nil { + return nil, err + } + + defer response.Body.Close() + + var state TunnelState + if err := json.NewDecoder(response.Body).Decode(&state); err != nil { + return nil, fmt.Errorf("failed to decode body: %w", err) + } + + return &state, nil +} + +func (client *httpClient) GetSystemInformation(ctx context.Context, writer io.Writer) error { + response, err := client.GET(ctx, systemInformationEndpoint) + if err != nil { + return err + } + + return copyJSONToWriter(response, writer) +} + +func (client *httpClient) GetMetrics(ctx context.Context, writer io.Writer) error { + response, err := client.GET(ctx, metricsEndpoint) + if err != nil { + return err + } + + return copyToWriter(response, writer) +} + +func (client *httpClient) GetTunnelConfiguration(ctx context.Context, writer io.Writer) error { + response, err := client.GET(ctx, tunnelConfigurationEndpoint) + if err != nil { + return err + } + + return copyJSONToWriter(response, writer) +} + +func (client *httpClient) GetCliConfiguration(ctx context.Context, writer io.Writer) error { + response, err := client.GET(ctx, cliConfigurationEndpoint) + if err != nil { + return err + } + + return copyJSONToWriter(response, writer) +} + +func copyToWriter(response *http.Response, writer io.Writer) error { + defer response.Body.Close() + + _, err := io.Copy(writer, response.Body) + if err != nil { + return fmt.Errorf("error writing response: %w", err) + } + + return nil +} + +func copyJSONToWriter(response *http.Response, writer io.Writer) error { + defer response.Body.Close() + + var data interface{} + + decoder := json.NewDecoder(response.Body) + + err := decoder.Decode(&data) + if err != nil { + return fmt.Errorf("diagnostic client error whilst reading response: %w", err) + } + + encoder := newFormattedEncoder(writer) + + err = encoder.Encode(data) + if err != nil { + return fmt.Errorf("diagnostic client error whilst writing json: %w", err) + } + + return nil +} + +type HTTPClient interface { + GetLogConfiguration(ctx context.Context) (*LogConfiguration, error) + GetMemoryDump(ctx context.Context, writer io.Writer) error + GetGoroutineDump(ctx context.Context, writer io.Writer) error + GetTunnelState(ctx context.Context) (*TunnelState, error) + GetSystemInformation(ctx context.Context, writer io.Writer) error + GetMetrics(ctx context.Context, writer io.Writer) error + GetCliConfiguration(ctx context.Context, writer io.Writer) error + GetTunnelConfiguration(ctx context.Context, writer io.Writer) error +} diff --git a/diagnostic/consts.go b/diagnostic/consts.go new file mode 100644 index 00000000..6a7e4449 --- /dev/null +++ b/diagnostic/consts.go @@ -0,0 +1,37 @@ +package diagnostic + +import "time" + +const ( + defaultCollectorTimeout = time.Second * 10 // This const define the timeout value of a collector operation. + collectorField = "collector" // used for logging purposes + systemCollectorName = "system" // used for logging purposes + tunnelStateCollectorName = "tunnelState" // used for logging purposes + configurationCollectorName = "configuration" // used for logging purposes + defaultTimeout = 15 * time.Second // timeout for the collectors + twoWeeksOffset = -14 * 24 * time.Hour // maximum offset for the logs + logFilename = "cloudflared_logs.txt" // name of the output log file + configurationKeyUID = "uid" // Key used to set and get the UID value from the configuration map + tailMaxNumberOfLines = "10000" // maximum number of log lines from a virtual runtime (docker or kubernetes) + + // Endpoints used by the diagnostic HTTP Client. + cliConfigurationEndpoint = "/diag/configuration" + tunnelStateEndpoint = "/diag/tunnel" + systemInformationEndpoint = "/diag/system" + memoryDumpEndpoint = "debug/pprof/heap" + goroutineDumpEndpoint = "debug/pprof/goroutine" + metricsEndpoint = "metrics" + tunnelConfigurationEndpoint = "/config" + // Base for filenames of the diagnostic procedure + systemInformationBaseName = "systeminformation.json" + metricsBaseName = "metrics.txt" + zipName = "cloudflared-diag" + heapPprofBaseName = "heap.pprof" + goroutinePprofBaseName = "goroutine.pprof" + networkBaseName = "network.json" + rawNetworkBaseName = "raw-network.txt" + tunnelStateBaseName = "tunnelstate.json" + cliConfigurationBaseName = "cli-configuration.json" + configurationBaseName = "configuration.json" + taskResultBaseName = "task-result.json" +) diff --git a/diagnostic/diagnostic.go b/diagnostic/diagnostic.go new file mode 100644 index 00000000..0b0edbc8 --- /dev/null +++ b/diagnostic/diagnostic.go @@ -0,0 +1,561 @@ +package diagnostic + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "sync" + "time" + + "github.com/rs/zerolog" + + network "github.com/cloudflare/cloudflared/diagnostic/network" +) + +const ( + taskSuccess = "success" + taskFailure = "failure" + jobReportName = "job report" + tunnelStateJobName = "tunnel state" + systemInformationJobName = "system information" + goroutineJobName = "goroutine profile" + heapJobName = "heap profile" + metricsJobName = "metrics" + logInformationJobName = "log information" + rawNetworkInformationJobName = "raw network information" + networkInformationJobName = "network information" + cliConfigurationJobName = "cli configuration" + configurationJobName = "configuration" +) + +// Struct used to hold the results of different routines executing the network collection. +type taskResult struct { + Result string `json:"result,omitempty"` + Err error `json:"error,omitempty"` + path string +} + +func (result taskResult) MarshalJSON() ([]byte, error) { + s := map[string]string{ + "result": result.Result, + } + if result.Err != nil { + s["error"] = result.Err.Error() + } + + return json.Marshal(s) +} + +// Struct used to hold the results of different routines executing the network collection. +type networkCollectionResult struct { + name string + info []*network.Hop + raw string + err error +} + +// This type represents the most common functions from the diagnostic http client +// functions. +type collectToWriterFunc func(ctx context.Context, writer io.Writer) error + +// This type represents the common denominator among all the collection procedures. +type collectFunc func(ctx context.Context) (string, error) + +// collectJob is an internal struct that denotes holds the information necessary +// to run a collection job. +type collectJob struct { + jobName string + fn collectFunc + bypass bool +} + +// The Toggles structure denotes the available toggles for the diagnostic procedure. +// Each toggle enables/disables tasks from the diagnostic. +type Toggles struct { + NoDiagLogs bool + NoDiagMetrics bool + NoDiagSystem bool + NoDiagRuntime bool + NoDiagNetwork bool +} + +// The Options structure holds every option necessary for +// the diagnostic procedure to work. +type Options struct { + KnownAddresses []string + Address string + ContainerID string + PodID string + Toggles Toggles +} + +func collectLogs( + ctx context.Context, + client HTTPClient, + diagContainer, diagPod string, +) (string, error) { + var collector LogCollector + if diagPod != "" { + collector = NewKubernetesLogCollector(diagContainer, diagPod) + } else if diagContainer != "" { + collector = NewDockerLogCollector(diagContainer) + } else { + collector = NewHostLogCollector(client) + } + + logInformation, err := collector.Collect(ctx) + if err != nil { + return "", fmt.Errorf("error collecting logs: %w", err) + } + + if logInformation.isDirectory { + return CopyFilesFromDirectory(logInformation.path) + } + + if logInformation.wasCreated { + return logInformation.path, nil + } + + logHandle, err := os.Open(logInformation.path) + if err != nil { + return "", fmt.Errorf("error opening log file while collecting logs: %w", err) + } + defer logHandle.Close() + + outputLogHandle, err := os.Create(filepath.Join(os.TempDir(), logFilename)) + if err != nil { + return "", ErrCreatingTemporaryFile + } + defer outputLogHandle.Close() + + _, err = io.Copy(outputLogHandle, logHandle) + if err != nil { + return "", fmt.Errorf("error copying logs while collecting logs: %w", err) + } + + return outputLogHandle.Name(), err +} + +func collectNetworkResultRoutine( + ctx context.Context, + collector network.NetworkCollector, + hostname string, + useIPv4 bool, + results chan networkCollectionResult, +) { + const ( + hopsNo = 5 + timeout = time.Second * 5 + ) + + name := hostname + + if useIPv4 { + name += "-v4" + } else { + name += "-v6" + } + + hops, raw, err := collector.Collect(ctx, network.NewTraceOptions(hopsNo, timeout, hostname, useIPv4)) + results <- networkCollectionResult{name, hops, raw, err} +} + +func gatherNetworkInformation(ctx context.Context) map[string]networkCollectionResult { + networkCollector := network.NetworkCollectorImpl{} + + hostAndIPversionPairs := []struct { + host string + useV4 bool + }{ + {"region1.v2.argotunnel.com", true}, + {"region1.v2.argotunnel.com", false}, + {"region2.v2.argotunnel.com", true}, + {"region2.v2.argotunnel.com", false}, + } + + // the number of results is known thus use len to avoid footguns + results := make(chan networkCollectionResult, len(hostAndIPversionPairs)) + + var wgroup sync.WaitGroup + + for _, item := range hostAndIPversionPairs { + wgroup.Add(1) + + go func() { + defer wgroup.Done() + collectNetworkResultRoutine(ctx, &networkCollector, item.host, item.useV4, results) + }() + } + + // Wait for routines to end. + wgroup.Wait() + + resultMap := make(map[string]networkCollectionResult) + + for range len(hostAndIPversionPairs) { + result := <-results + resultMap[result.name] = result + } + + return resultMap +} + +func networkInformationCollectors() (rawNetworkCollector, jsonNetworkCollector collectFunc) { + // The network collector is an operation that takes most of the diagnostic time, thus, + // the sync.Once is used to memoize the result of the collector and then create different + // outputs. + var once sync.Once + + var resultMap map[string]networkCollectionResult + + rawNetworkCollector = func(ctx context.Context) (string, error) { + once.Do(func() { resultMap = gatherNetworkInformation(ctx) }) + + return rawNetworkInformationWriter(resultMap) + } + jsonNetworkCollector = func(ctx context.Context) (string, error) { + once.Do(func() { resultMap = gatherNetworkInformation(ctx) }) + + return jsonNetworkInformationWriter(resultMap) + } + + return rawNetworkCollector, jsonNetworkCollector +} + +func rawNetworkInformationWriter(resultMap map[string]networkCollectionResult) (string, error) { + networkDumpHandle, err := os.Create(filepath.Join(os.TempDir(), rawNetworkBaseName)) + if err != nil { + return "", ErrCreatingTemporaryFile + } + + defer networkDumpHandle.Close() + + var exitErr error + + for k, v := range resultMap { + if v.err != nil { + if exitErr == nil { + exitErr = v.err + } + + _, err := networkDumpHandle.WriteString(k + "\nno content\n") + if err != nil { + return networkDumpHandle.Name(), fmt.Errorf("error writing 'no content' to raw network file: %w", err) + } + } else { + _, err := networkDumpHandle.WriteString(k + "\n" + v.raw + "\n") + if err != nil { + return networkDumpHandle.Name(), fmt.Errorf("error writing raw network information: %w", err) + } + } + } + + return networkDumpHandle.Name(), exitErr +} + +func jsonNetworkInformationWriter(resultMap map[string]networkCollectionResult) (string, error) { + networkDumpHandle, err := os.Create(filepath.Join(os.TempDir(), networkBaseName)) + if err != nil { + return "", ErrCreatingTemporaryFile + } + + defer networkDumpHandle.Close() + + encoder := newFormattedEncoder(networkDumpHandle) + + var exitErr error + + jsonMap := make(map[string][]*network.Hop, len(resultMap)) + for k, v := range resultMap { + jsonMap[k] = v.info + + if exitErr == nil && v.err != nil { + exitErr = v.err + } + } + + err = encoder.Encode(jsonMap) + if err != nil { + return networkDumpHandle.Name(), fmt.Errorf("error encoding network information results: %w", err) + } + + return networkDumpHandle.Name(), exitErr +} + +func collectFromEndpointAdapter(collect collectToWriterFunc, fileName string) collectFunc { + return func(ctx context.Context) (string, error) { + dumpHandle, err := os.Create(filepath.Join(os.TempDir(), fileName)) + if err != nil { + return "", ErrCreatingTemporaryFile + } + defer dumpHandle.Close() + + err = collect(ctx, dumpHandle) + if err != nil { + return dumpHandle.Name(), fmt.Errorf("error running collector: %w", err) + } + + return dumpHandle.Name(), nil + } +} + +func tunnelStateCollectEndpointAdapter(client HTTPClient, tunnel *TunnelState, fileName string) collectFunc { + endpointFunc := func(ctx context.Context, writer io.Writer) error { + if tunnel == nil { + // When the metrics server is not passed the diagnostic will query all known hosts + // and get the tunnel state, however, when the metrics server is passed that won't + // happen hence the check for nil in this function. + tunnelResponse, err := client.GetTunnelState(ctx) + if err != nil { + return fmt.Errorf("error retrieving tunnel state: %w", err) + } + + tunnel = tunnelResponse + } + + encoder := newFormattedEncoder(writer) + + err := encoder.Encode(tunnel) + if err != nil { + return fmt.Errorf("error encoding tunnel state: %w", err) + } + + return nil + } + + return collectFromEndpointAdapter(endpointFunc, fileName) +} + +// resolveInstanceBaseURL is responsible to +// resolve the base URL of the instance that should be diagnosed. +// To resolve the instance it may be necessary to query the +// /diag/tunnel endpoint of the known instances, thus, if a single +// instance is found its state is also returned; if multiple instances +// are found then their states are returned in an array along with an +// error. +func resolveInstanceBaseURL( + metricsServerAddress string, + log *zerolog.Logger, + client *httpClient, + addresses []string, +) (*url.URL, *TunnelState, []*AddressableTunnelState, error) { + if metricsServerAddress != "" { + if !strings.HasPrefix(metricsServerAddress, "http://") { + metricsServerAddress = "http://" + metricsServerAddress + } + url, err := url.Parse(metricsServerAddress) + if err != nil { + return nil, nil, nil, fmt.Errorf("provided address is not valid: %w", err) + } + + return url, nil, nil, nil + } + + tunnelState, foundTunnelStates, err := FindMetricsServer(log, client, addresses) + if err != nil { + return nil, nil, foundTunnelStates, err + } + + return tunnelState.URL, tunnelState.TunnelState, nil, nil +} + +func createJobs( + client *httpClient, + tunnel *TunnelState, + diagContainer string, + diagPod string, + noDiagSystem bool, + noDiagRuntime bool, + noDiagMetrics bool, + noDiagLogs bool, + noDiagNetwork bool, +) []collectJob { + rawNetworkCollectorFunc, jsonNetworkCollectorFunc := networkInformationCollectors() + jobs := []collectJob{ + { + jobName: tunnelStateJobName, + fn: tunnelStateCollectEndpointAdapter(client, tunnel, tunnelStateBaseName), + bypass: false, + }, + { + jobName: systemInformationJobName, + fn: collectFromEndpointAdapter(client.GetSystemInformation, systemInformationBaseName), + bypass: noDiagSystem, + }, + { + jobName: goroutineJobName, + fn: collectFromEndpointAdapter(client.GetGoroutineDump, goroutinePprofBaseName), + bypass: noDiagRuntime, + }, + { + jobName: heapJobName, + fn: collectFromEndpointAdapter(client.GetMemoryDump, heapPprofBaseName), + bypass: noDiagRuntime, + }, + { + jobName: metricsJobName, + fn: collectFromEndpointAdapter(client.GetMetrics, metricsBaseName), + bypass: noDiagMetrics, + }, + { + jobName: logInformationJobName, + fn: func(ctx context.Context) (string, error) { + return collectLogs(ctx, client, diagContainer, diagPod) + }, + bypass: noDiagLogs, + }, + { + jobName: rawNetworkInformationJobName, + fn: rawNetworkCollectorFunc, + bypass: noDiagNetwork, + }, + { + jobName: networkInformationJobName, + fn: jsonNetworkCollectorFunc, + bypass: noDiagNetwork, + }, + { + jobName: cliConfigurationJobName, + fn: collectFromEndpointAdapter(client.GetCliConfiguration, cliConfigurationBaseName), + bypass: false, + }, + { + jobName: configurationJobName, + fn: collectFromEndpointAdapter(client.GetTunnelConfiguration, configurationBaseName), + bypass: false, + }, + } + + return jobs +} + +func createTaskReport(taskReport map[string]taskResult) (string, error) { + dumpHandle, err := os.Create(filepath.Join(os.TempDir(), taskResultBaseName)) + if err != nil { + return "", ErrCreatingTemporaryFile + } + defer dumpHandle.Close() + + encoder := newFormattedEncoder(dumpHandle) + + err = encoder.Encode(taskReport) + if err != nil { + return "", fmt.Errorf("error encoding task results: %w", err) + } + + return dumpHandle.Name(), nil +} + +func runJobs(ctx context.Context, jobs []collectJob, log *zerolog.Logger) map[string]taskResult { + jobReport := make(map[string]taskResult, len(jobs)) + + for _, job := range jobs { + if job.bypass { + continue + } + + log.Info().Msgf("Collecting %s...", job.jobName) + path, err := job.fn(ctx) + + var result taskResult + if err != nil { + result = taskResult{Result: taskFailure, Err: err, path: path} + + log.Error().Err(err).Msgf("Job: %s finished with error.", job.jobName) + } else { + result = taskResult{Result: taskSuccess, Err: nil, path: path} + + log.Info().Msgf("Collected %s.", job.jobName) + } + + jobReport[job.jobName] = result + } + + taskReportName, err := createTaskReport(jobReport) + + var result taskResult + + if err != nil { + result = taskResult{ + Result: taskFailure, + path: taskReportName, + Err: err, + } + } else { + result = taskResult{ + Result: taskSuccess, + path: taskReportName, + Err: nil, + } + } + + jobReport[jobReportName] = result + + return jobReport +} + +func RunDiagnostic( + log *zerolog.Logger, + options Options, +) ([]*AddressableTunnelState, error) { + client := NewHTTPClient() + + baseURL, tunnel, foundTunnels, err := resolveInstanceBaseURL(options.Address, log, client, options.KnownAddresses) + if err != nil { + return foundTunnels, err + } + + log.Info().Msgf("Selected server %s starting diagnostic...", baseURL.String()) + client.SetBaseURL(baseURL) + + const timeout = 45 * time.Second + ctx, cancel := context.WithTimeout(context.Background(), timeout) + + defer cancel() + + jobs := createJobs( + client, + tunnel, + options.ContainerID, + options.PodID, + options.Toggles.NoDiagSystem, + options.Toggles.NoDiagRuntime, + options.Toggles.NoDiagMetrics, + options.Toggles.NoDiagLogs, + options.Toggles.NoDiagNetwork, + ) + + jobsReport := runJobs(ctx, jobs, log) + paths := make([]string, 0) + + var gerr error + + for _, v := range jobsReport { + paths = append(paths, v.path) + + if gerr == nil && v.Err != nil { + gerr = v.Err + } + + defer func() { + if !errors.Is(v.Err, ErrCreatingTemporaryFile) { + os.Remove(v.path) + } + }() + } + + zipfile, err := CreateDiagnosticZipFile(zipName, paths) + if err != nil { + return nil, err + } + + log.Info().Msgf("Diagnostic file written: %v", zipfile) + + return nil, gerr +} diff --git a/diagnostic/diagnostic_utils.go b/diagnostic/diagnostic_utils.go new file mode 100644 index 00000000..f760994b --- /dev/null +++ b/diagnostic/diagnostic_utils.go @@ -0,0 +1,148 @@ +package diagnostic + +import ( + "archive/zip" + "context" + "encoding/json" + "fmt" + "io" + "net/url" + "os" + "path/filepath" + "strings" + "time" + + "github.com/google/uuid" + "github.com/rs/zerolog" +) + +// CreateDiagnosticZipFile create a zip file with the contents from the all +// files paths. The files will be written in the root of the zip file. +// In case of an error occurs after whilst writing to the zip file +// this will be removed. +func CreateDiagnosticZipFile(base string, paths []string) (zipFileName string, err error) { + // Create a zip file with all files from paths added to the root + suffix := time.Now().Format(time.RFC3339) + zipFileName = base + "-" + suffix + ".zip" + zipFileName = strings.ReplaceAll(zipFileName, ":", "-") + + archive, cerr := os.Create(zipFileName) + if cerr != nil { + return "", fmt.Errorf("error creating file %s: %w", zipFileName, cerr) + } + + archiveWriter := zip.NewWriter(archive) + + defer func() { + archiveWriter.Close() + archive.Close() + + if err != nil { + os.Remove(zipFileName) + } + }() + + for _, file := range paths { + if file == "" { + continue + } + + var handle *os.File + + handle, err = os.Open(file) + if err != nil { + return "", fmt.Errorf("error opening file %s: %w", zipFileName, err) + } + + defer handle.Close() + + // Keep the base only to not create sub directories in the + // zip file. + var writer io.Writer + + writer, err = archiveWriter.Create(filepath.Base(file)) + if err != nil { + return "", fmt.Errorf("error creating archive writer from %s: %w", file, err) + } + + if _, err = io.Copy(writer, handle); err != nil { + return "", fmt.Errorf("error copying file %s: %w", file, err) + } + } + + zipFileName = archive.Name() + return zipFileName, nil +} + +type AddressableTunnelState struct { + *TunnelState + URL *url.URL +} + +func findMetricsServerPredicate(tunnelID, connectorID uuid.UUID) func(state *TunnelState) bool { + if tunnelID != uuid.Nil && connectorID != uuid.Nil { + return func(state *TunnelState) bool { + return state.ConnectorID == connectorID && state.TunnelID == tunnelID + } + } else if tunnelID == uuid.Nil && connectorID != uuid.Nil { + return func(state *TunnelState) bool { + return state.ConnectorID == connectorID + } + } else if tunnelID != uuid.Nil && connectorID == uuid.Nil { + return func(state *TunnelState) bool { + return state.TunnelID == tunnelID + } + } + + return func(*TunnelState) bool { + return true + } +} + +// The FindMetricsServer will try to find the metrics server url. +// There are two possible error scenarios: +// 1. No instance is found which will only return ErrMetricsServerNotFound +// 2. Multiple instances are found which will return an array of state and ErrMultipleMetricsServerFound +// In case of success, only the state for the instance is returned. +func FindMetricsServer( + log *zerolog.Logger, + client *httpClient, + addresses []string, +) (*AddressableTunnelState, []*AddressableTunnelState, error) { + instances := make([]*AddressableTunnelState, 0) + + for _, address := range addresses { + url, err := url.Parse("http://" + address) + if err != nil { + log.Debug().Err(err).Msgf("error parsing address %s", address) + + continue + } + + client.SetBaseURL(url) + + state, err := client.GetTunnelState(context.Background()) + if err == nil { + instances = append(instances, &AddressableTunnelState{state, url}) + } else { + log.Debug().Err(err).Msgf("error getting tunnel state from address %s", address) + } + } + + if len(instances) == 0 { + return nil, nil, ErrMetricsServerNotFound + } + + if len(instances) == 1 { + return instances[0], nil, nil + } + + return nil, instances, ErrMultipleMetricsServerFound +} + +// newFormattedEncoder return a JSON encoder with identation +func newFormattedEncoder(w io.Writer) *json.Encoder { + encoder := json.NewEncoder(w) + encoder.SetIndent("", " ") + return encoder +} diff --git a/diagnostic/diagnostic_utils_test.go b/diagnostic/diagnostic_utils_test.go new file mode 100644 index 00000000..f0f5a6a3 --- /dev/null +++ b/diagnostic/diagnostic_utils_test.go @@ -0,0 +1,147 @@ +package diagnostic_test + +import ( + "context" + "net/http" + "net/url" + "sync" + "testing" + "time" + + "github.com/facebookgo/grace/gracenet" + "github.com/google/uuid" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cloudflare/cloudflared/diagnostic" + "github.com/cloudflare/cloudflared/metrics" + "github.com/cloudflare/cloudflared/tunnelstate" +) + +func helperCreateServer(t *testing.T, listeners *gracenet.Net, tunnelID uuid.UUID, connectorID uuid.UUID) func() { + t.Helper() + listener, err := metrics.CreateMetricsListener(listeners, "localhost:0") + require.NoError(t, err) + log := zerolog.Nop() + tracker := tunnelstate.NewConnTracker(&log) + handler := diagnostic.NewDiagnosticHandler(&log, 0, nil, tunnelID, connectorID, tracker, map[string]string{}, []string{}) + router := http.NewServeMux() + router.HandleFunc("/diag/tunnel", handler.TunnelStateHandler) + server := &http.Server{ + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + Handler: router, + } + + var wgroup sync.WaitGroup + + wgroup.Add(1) + + go func() { + defer wgroup.Done() + + _ = server.Serve(listener) + }() + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + + cleanUp := func() { + _ = server.Shutdown(ctx) + + cancel() + wgroup.Wait() + } + + return cleanUp +} + +func TestFindMetricsServer_WhenSingleServerIsRunning_ReturnState(t *testing.T) { + listeners := gracenet.Net{} + tid1 := uuid.New() + cid1 := uuid.New() + + cleanUp := helperCreateServer(t, &listeners, tid1, cid1) + defer cleanUp() + + log := zerolog.Nop() + client := diagnostic.NewHTTPClient() + addresses := metrics.GetMetricsKnownAddresses("host") + url1, err := url.Parse("http://localhost:20241") + require.NoError(t, err) + + tunnel1 := &diagnostic.AddressableTunnelState{ + TunnelState: &diagnostic.TunnelState{ + TunnelID: tid1, + ConnectorID: cid1, + Connections: nil, + }, + URL: url1, + } + + state, tunnels, err := diagnostic.FindMetricsServer(&log, client, addresses[:]) + if err != nil { + require.ErrorIs(t, err, diagnostic.ErrMultipleMetricsServerFound) + } + + assert.Equal(t, tunnel1, state) + assert.Nil(t, tunnels) +} + +func TestFindMetricsServer_WhenMultipleServerAreRunning_ReturnError(t *testing.T) { + listeners := gracenet.Net{} + tid1 := uuid.New() + cid1 := uuid.New() + cid2 := uuid.New() + + cleanUp := helperCreateServer(t, &listeners, tid1, cid1) + defer cleanUp() + + cleanUp = helperCreateServer(t, &listeners, tid1, cid2) + defer cleanUp() + + log := zerolog.Nop() + client := diagnostic.NewHTTPClient() + addresses := metrics.GetMetricsKnownAddresses("host") + url1, err := url.Parse("http://localhost:20241") + require.NoError(t, err) + url2, err := url.Parse("http://localhost:20242") + require.NoError(t, err) + + tunnel1 := &diagnostic.AddressableTunnelState{ + TunnelState: &diagnostic.TunnelState{ + TunnelID: tid1, + ConnectorID: cid1, + Connections: nil, + }, + URL: url1, + } + tunnel2 := &diagnostic.AddressableTunnelState{ + TunnelState: &diagnostic.TunnelState{ + TunnelID: tid1, + ConnectorID: cid2, + Connections: nil, + }, + URL: url2, + } + + state, tunnels, err := diagnostic.FindMetricsServer(&log, client, addresses[:]) + if err != nil { + require.ErrorIs(t, err, diagnostic.ErrMultipleMetricsServerFound) + } + + assert.Nil(t, state) + assert.Equal(t, []*diagnostic.AddressableTunnelState{tunnel1, tunnel2}, tunnels) +} + +func TestFindMetricsServer_WhenNoInstanceIsRuning_ReturnError(t *testing.T) { + log := zerolog.Nop() + client := diagnostic.NewHTTPClient() + addresses := metrics.GetMetricsKnownAddresses("host") + + state, tunnels, err := diagnostic.FindMetricsServer(&log, client, addresses[:]) + require.ErrorIs(t, err, diagnostic.ErrMetricsServerNotFound) + + assert.Nil(t, state) + assert.Nil(t, tunnels) +} diff --git a/diagnostic/error.go b/diagnostic/error.go new file mode 100644 index 00000000..3a77a172 --- /dev/null +++ b/diagnostic/error.go @@ -0,0 +1,28 @@ +package diagnostic + +import ( + "errors" +) + +var ( + // Error used when there is no log directory available. + ErrManagedLogNotFound = errors.New("managed log directory not found") + // Error used when it is not possible to collect logs using the log configuration. + ErrLogConfigurationIsInvalid = errors.New("provided log configuration is invalid") + // Error used when parsing the fields of the output of collector. + ErrInsufficientLines = errors.New("insufficient lines") + // Error used when parsing the lines of the output of collector. + ErrInsuficientFields = errors.New("insufficient fields") + // Error used when given key is not found while parsing KV. + ErrKeyNotFound = errors.New("key not found") + // Error used when there is no disk volume information available. + ErrNoVolumeFound = errors.New("no disk volume information found") + // Error user when the base url of the diagnostic client is not provided. + ErrNoBaseURL = errors.New("no base url") + // Error used when no metrics server is found listening to the known addresses list (check [metrics.GetMetricsKnownAddresses]). + ErrMetricsServerNotFound = errors.New("metrics server not found") + // Error used when multiple metrics server are found listening to the known addresses list (check [metrics.GetMetricsKnownAddresses]). + ErrMultipleMetricsServerFound = errors.New("multiple metrics server found") + // Error used when a temporary file creation fails within the diagnostic procedure + ErrCreatingTemporaryFile = errors.New("temporary file creation failed") +) diff --git a/diagnostic/handlers.go b/diagnostic/handlers.go new file mode 100644 index 00000000..e4d85db4 --- /dev/null +++ b/diagnostic/handlers.go @@ -0,0 +1,144 @@ +package diagnostic + +import ( + "context" + "encoding/json" + "net/http" + "os" + "strconv" + "time" + + "github.com/google/uuid" + "github.com/rs/zerolog" + + "github.com/cloudflare/cloudflared/tunnelstate" +) + +type Handler struct { + log *zerolog.Logger + timeout time.Duration + systemCollector SystemCollector + tunnelID uuid.UUID + connectorID uuid.UUID + tracker *tunnelstate.ConnTracker + cliFlags map[string]string + icmpSources []string +} + +func NewDiagnosticHandler( + log *zerolog.Logger, + timeout time.Duration, + systemCollector SystemCollector, + tunnelID uuid.UUID, + connectorID uuid.UUID, + tracker *tunnelstate.ConnTracker, + cliFlags map[string]string, + icmpSources []string, +) *Handler { + logger := log.With().Logger() + if timeout == 0 { + timeout = defaultCollectorTimeout + } + + cliFlags[configurationKeyUID] = strconv.Itoa(os.Getuid()) + return &Handler{ + log: &logger, + timeout: timeout, + systemCollector: systemCollector, + tunnelID: tunnelID, + connectorID: connectorID, + tracker: tracker, + cliFlags: cliFlags, + icmpSources: icmpSources, + } +} + +func (handler *Handler) InstallEndpoints(router *http.ServeMux) { + router.HandleFunc(cliConfigurationEndpoint, handler.ConfigurationHandler) + router.HandleFunc(tunnelStateEndpoint, handler.TunnelStateHandler) + router.HandleFunc(systemInformationEndpoint, handler.SystemHandler) +} + +type SystemInformationResponse struct { + Info *SystemInformation `json:"info"` + Err error `json:"errors"` +} + +func (handler *Handler) SystemHandler(writer http.ResponseWriter, request *http.Request) { + logger := handler.log.With().Str(collectorField, systemCollectorName).Logger() + logger.Info().Msg("Collection started") + + defer logger.Info().Msg("Collection finished") + + ctx, cancel := context.WithTimeout(request.Context(), handler.timeout) + + defer cancel() + + info, err := handler.systemCollector.Collect(ctx) + + response := SystemInformationResponse{ + Info: info, + Err: err, + } + + encoder := json.NewEncoder(writer) + err = encoder.Encode(response) + if err != nil { + logger.Error().Err(err).Msgf("error occurred whilst serializing information") + writer.WriteHeader(http.StatusInternalServerError) + } +} + +type TunnelState struct { + TunnelID uuid.UUID `json:"tunnelID,omitempty"` + ConnectorID uuid.UUID `json:"connectorID,omitempty"` + Connections []tunnelstate.IndexedConnectionInfo `json:"connections,omitempty"` + ICMPSources []string `json:"icmp_sources,omitempty"` +} + +func (handler *Handler) TunnelStateHandler(writer http.ResponseWriter, _ *http.Request) { + log := handler.log.With().Str(collectorField, tunnelStateCollectorName).Logger() + log.Info().Msg("Collection started") + + defer log.Info().Msg("Collection finished") + + body := TunnelState{ + handler.tunnelID, + handler.connectorID, + handler.tracker.GetActiveConnections(), + handler.icmpSources, + } + encoder := json.NewEncoder(writer) + + err := encoder.Encode(body) + if err != nil { + handler.log.Error().Err(err).Msgf("error occurred whilst serializing information") + writer.WriteHeader(http.StatusInternalServerError) + } +} + +func (handler *Handler) ConfigurationHandler(writer http.ResponseWriter, _ *http.Request) { + log := handler.log.With().Str(collectorField, configurationCollectorName).Logger() + log.Info().Msg("Collection started") + + defer func() { + log.Info().Msg("Collection finished") + }() + + encoder := json.NewEncoder(writer) + + err := encoder.Encode(handler.cliFlags) + if err != nil { + handler.log.Error().Err(err).Msgf("error occurred whilst serializing response") + writer.WriteHeader(http.StatusInternalServerError) + } +} + +func writeResponse(w http.ResponseWriter, bytes []byte, logger *zerolog.Logger) { + bytesWritten, err := w.Write(bytes) + if err != nil { + logger.Error().Err(err).Msg("error occurred writing response") + } else if bytesWritten != len(bytes) { + logger.Error().Msgf("error incomplete write response %d/%d", bytesWritten, len(bytes)) + } +} diff --git a/diagnostic/handlers_test.go b/diagnostic/handlers_test.go new file mode 100644 index 00000000..2849241c --- /dev/null +++ b/diagnostic/handlers_test.go @@ -0,0 +1,224 @@ +package diagnostic_test + +import ( + "context" + "encoding/json" + "errors" + "net" + "net/http" + "net/http/httptest" + "runtime" + "testing" + + "github.com/google/uuid" + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cloudflare/cloudflared/connection" + "github.com/cloudflare/cloudflared/diagnostic" + "github.com/cloudflare/cloudflared/tunnelstate" +) + +type SystemCollectorMock struct { + systemInfo *diagnostic.SystemInformation + err error +} + +const ( + systemInformationKey = "sikey" + errorKey = "errkey" +) + +func newTrackerFromConns(t *testing.T, connections []tunnelstate.IndexedConnectionInfo) *tunnelstate.ConnTracker { + t.Helper() + + log := zerolog.Nop() + tracker := tunnelstate.NewConnTracker(&log) + + for _, conn := range connections { + tracker.OnTunnelEvent(connection.Event{ + Index: conn.Index, + EventType: connection.Connected, + Protocol: conn.Protocol, + EdgeAddress: conn.EdgeAddress, + }) + } + + return tracker +} + +func (collector *SystemCollectorMock) Collect(context.Context) (*diagnostic.SystemInformation, error) { + return collector.systemInfo, collector.err +} + +func TestSystemHandler(t *testing.T) { + t.Parallel() + + log := zerolog.Nop() + tests := []struct { + name string + systemInfo *diagnostic.SystemInformation + err error + statusCode int + }{ + { + name: "happy path", + systemInfo: diagnostic.NewSystemInformation( + 0, 0, 0, 0, + "string", "string", "string", "string", + "string", "string", + runtime.Version(), runtime.GOARCH, nil, + ), + + err: nil, + statusCode: http.StatusOK, + }, + { + name: "on error and no raw info", systemInfo: nil, + err: errors.New("an error"), statusCode: http.StatusOK, + }, + } + + for _, tCase := range tests { + t.Run(tCase.name, func(t *testing.T) { + t.Parallel() + handler := diagnostic.NewDiagnosticHandler(&log, 0, &SystemCollectorMock{ + systemInfo: tCase.systemInfo, + err: tCase.err, + }, uuid.New(), uuid.New(), nil, map[string]string{}, nil) + recorder := httptest.NewRecorder() + ctx := context.Background() + request, err := http.NewRequestWithContext(ctx, http.MethodGet, "/diag/system", nil) + require.NoError(t, err) + handler.SystemHandler(recorder, request) + + assert.Equal(t, tCase.statusCode, recorder.Code) + if tCase.statusCode == http.StatusOK && tCase.systemInfo != nil { + var response diagnostic.SystemInformationResponse + decoder := json.NewDecoder(recorder.Body) + err := decoder.Decode(&response) + require.NoError(t, err) + assert.Equal(t, tCase.systemInfo, response.Info) + } + }) + } +} + +func TestTunnelStateHandler(t *testing.T) { + t.Parallel() + + log := zerolog.Nop() + tests := []struct { + name string + tunnelID uuid.UUID + clientID uuid.UUID + connections []tunnelstate.IndexedConnectionInfo + icmpSources []string + }{ + { + name: "case1", + tunnelID: uuid.New(), + clientID: uuid.New(), + }, + { + name: "case2", + tunnelID: uuid.New(), + clientID: uuid.New(), + icmpSources: []string{"172.17.0.3", "::1"}, + connections: []tunnelstate.IndexedConnectionInfo{{ + ConnectionInfo: tunnelstate.ConnectionInfo{ + IsConnected: true, + Protocol: connection.QUIC, + EdgeAddress: net.IPv4(100, 100, 100, 100), + }, + Index: 0, + }}, + }, + } + + for _, tCase := range tests { + t.Run(tCase.name, func(t *testing.T) { + t.Parallel() + tracker := newTrackerFromConns(t, tCase.connections) + handler := diagnostic.NewDiagnosticHandler( + &log, + 0, + nil, + tCase.tunnelID, + tCase.clientID, + tracker, + map[string]string{}, + tCase.icmpSources, + ) + recorder := httptest.NewRecorder() + handler.TunnelStateHandler(recorder, nil) + decoder := json.NewDecoder(recorder.Body) + + var response diagnostic.TunnelState + err := decoder.Decode(&response) + require.NoError(t, err) + assert.Equal(t, http.StatusOK, recorder.Code) + assert.Equal(t, tCase.tunnelID, response.TunnelID) + assert.Equal(t, tCase.clientID, response.ConnectorID) + assert.Equal(t, tCase.connections, response.Connections) + assert.Equal(t, tCase.icmpSources, response.ICMPSources) + }) + } +} + +func TestConfigurationHandler(t *testing.T) { + t.Parallel() + + log := zerolog.Nop() + + tests := []struct { + name string + flags map[string]string + expected map[string]string + }{ + { + name: "empty cli", + flags: make(map[string]string), + expected: map[string]string{ + "uid": "0", + }, + }, + { + name: "cli with flags", + flags: map[string]string{ + "b": "a", + "c": "a", + "d": "a", + "uid": "0", + }, + expected: map[string]string{ + "b": "a", + "c": "a", + "d": "a", + "uid": "0", + }, + }, + } + + for _, tCase := range tests { + t.Run(tCase.name, func(t *testing.T) { + t.Parallel() + + var response map[string]string + + handler := diagnostic.NewDiagnosticHandler(&log, 0, nil, uuid.New(), uuid.New(), nil, tCase.flags, nil) + recorder := httptest.NewRecorder() + handler.ConfigurationHandler(recorder, nil) + decoder := json.NewDecoder(recorder.Body) + err := decoder.Decode(&response) + require.NoError(t, err) + _, ok := response["uid"] + assert.True(t, ok) + delete(tCase.expected, "uid") + delete(response, "uid") + assert.Equal(t, http.StatusOK, recorder.Code) + assert.Equal(t, tCase.expected, response) + }) + } +} diff --git a/diagnostic/log_collector.go b/diagnostic/log_collector.go new file mode 100644 index 00000000..cdf559e7 --- /dev/null +++ b/diagnostic/log_collector.go @@ -0,0 +1,34 @@ +package diagnostic + +import ( + "context" +) + +// Represents the path of the log file or log directory. +// This struct is meant to give some ergonimics regarding +// the logging information. +type LogInformation struct { + path string // path to a file or directory + wasCreated bool // denotes if `path` was created + isDirectory bool // denotes if `path` is a directory +} + +func NewLogInformation( + path string, + wasCreated bool, + isDirectory bool, +) *LogInformation { + return &LogInformation{ + path, + wasCreated, + isDirectory, + } +} + +type LogCollector interface { + // This function is responsible for returning a path to a single file + // whose contents are the logs of a cloudflared instance. + // A new file may be create by a LogCollector, thus, its the caller + // responsibility to remove the newly create file. + Collect(ctx context.Context) (*LogInformation, error) +} diff --git a/diagnostic/log_collector_docker.go b/diagnostic/log_collector_docker.go new file mode 100644 index 00000000..f87a9534 --- /dev/null +++ b/diagnostic/log_collector_docker.go @@ -0,0 +1,47 @@ +package diagnostic + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" +) + +type DockerLogCollector struct { + containerID string // This member identifies the container by identifier or name +} + +func NewDockerLogCollector(containerID string) *DockerLogCollector { + return &DockerLogCollector{ + containerID, + } +} + +func (collector *DockerLogCollector) Collect(ctx context.Context) (*LogInformation, error) { + tmp := os.TempDir() + + outputHandle, err := os.Create(filepath.Join(tmp, logFilename)) + if err != nil { + return nil, fmt.Errorf("error opening output file: %w", err) + } + + defer outputHandle.Close() + + // Calculate 2 weeks ago + since := time.Now().Add(twoWeeksOffset).Format(time.RFC3339) + + command := exec.CommandContext( + ctx, + "docker", + "logs", + "--tail", + tailMaxNumberOfLines, + "--since", + since, + collector.containerID, + ) + + return PipeCommandOutputToFile(command, outputHandle) +} diff --git a/diagnostic/log_collector_host.go b/diagnostic/log_collector_host.go new file mode 100644 index 00000000..5218e975 --- /dev/null +++ b/diagnostic/log_collector_host.go @@ -0,0 +1,105 @@ +package diagnostic + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "runtime" +) + +const ( + linuxManagedLogsPath = "/var/log/cloudflared.err" + darwinManagedLogsPath = "/Library/Logs/com.cloudflare.cloudflared.err.log" + linuxServiceConfigurationPath = "/etc/systemd/system/cloudflared.service" + linuxSystemdPath = "/run/systemd/system" +) + +type HostLogCollector struct { + client HTTPClient +} + +func NewHostLogCollector(client HTTPClient) *HostLogCollector { + return &HostLogCollector{ + client, + } +} + +func extractLogsFromJournalCtl(ctx context.Context) (*LogInformation, error) { + tmp := os.TempDir() + + outputHandle, err := os.Create(filepath.Join(tmp, logFilename)) + if err != nil { + return nil, fmt.Errorf("error opening output file: %w", err) + } + + defer outputHandle.Close() + + command := exec.CommandContext( + ctx, + "journalctl", + "--since", + "2 weeks ago", + "-u", + "cloudflared.service", + ) + + return PipeCommandOutputToFile(command, outputHandle) +} + +func getServiceLogPath() (string, error) { + switch runtime.GOOS { + case "darwin": + { + path := darwinManagedLogsPath + if _, err := os.Stat(path); err == nil { + return path, nil + } + + userHomeDir, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("error getting user home: %w", err) + } + + return filepath.Join(userHomeDir, darwinManagedLogsPath), nil + } + case "linux": + { + return linuxManagedLogsPath, nil + } + default: + return "", ErrManagedLogNotFound + } +} + +func (collector *HostLogCollector) Collect(ctx context.Context) (*LogInformation, error) { + logConfiguration, err := collector.client.GetLogConfiguration(ctx) + if err != nil { + return nil, fmt.Errorf("error getting log configuration: %w", err) + } + + if logConfiguration.uid == 0 { + _, statSystemdErr := os.Stat(linuxServiceConfigurationPath) + + _, statServiceConfigurationErr := os.Stat(linuxServiceConfigurationPath) + if statSystemdErr == nil && statServiceConfigurationErr == nil && runtime.GOOS == "linux" { + return extractLogsFromJournalCtl(ctx) + } + + path, err := getServiceLogPath() + if err != nil { + return nil, err + } + + return NewLogInformation(path, false, false), nil + } + + if logConfiguration.logFile != "" { + return NewLogInformation(logConfiguration.logFile, false, false), nil + } else if logConfiguration.logDirectory != "" { + return NewLogInformation(logConfiguration.logDirectory, false, true), nil + } + + return nil, ErrLogConfigurationIsInvalid +} diff --git a/diagnostic/log_collector_kubernetes.go b/diagnostic/log_collector_kubernetes.go new file mode 100644 index 00000000..ce45030e --- /dev/null +++ b/diagnostic/log_collector_kubernetes.go @@ -0,0 +1,63 @@ +package diagnostic + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "time" +) + +type KubernetesLogCollector struct { + containerID string // This member identifies the container by identifier or name + pod string // This member identifies the pod where the container is deployed +} + +func NewKubernetesLogCollector(containerID, pod string) *KubernetesLogCollector { + return &KubernetesLogCollector{ + containerID, + pod, + } +} + +func (collector *KubernetesLogCollector) Collect(ctx context.Context) (*LogInformation, error) { + tmp := os.TempDir() + outputHandle, err := os.Create(filepath.Join(tmp, logFilename)) + if err != nil { + return nil, fmt.Errorf("error opening output file: %w", err) + } + + defer outputHandle.Close() + + var command *exec.Cmd + // Calculate 2 weeks ago + since := time.Now().Add(twoWeeksOffset).Format(time.RFC3339) + if collector.containerID != "" { + command = exec.CommandContext( + ctx, + "kubectl", + "logs", + collector.pod, + "--since-time", + since, + "--tail", + tailMaxNumberOfLines, + "-c", + collector.containerID, + ) + } else { + command = exec.CommandContext( + ctx, + "kubectl", + "logs", + collector.pod, + "--since-time", + since, + "--tail", + tailMaxNumberOfLines, + ) + } + + return PipeCommandOutputToFile(command, outputHandle) +} diff --git a/diagnostic/log_collector_utils.go b/diagnostic/log_collector_utils.go new file mode 100644 index 00000000..728b5298 --- /dev/null +++ b/diagnostic/log_collector_utils.go @@ -0,0 +1,109 @@ +package diagnostic + +import ( + "fmt" + "io" + "os" + "os/exec" + "path/filepath" +) + +func PipeCommandOutputToFile(command *exec.Cmd, outputHandle *os.File) (*LogInformation, error) { + stdoutReader, err := command.StdoutPipe() + if err != nil { + return nil, fmt.Errorf( + "error retrieving stdout from command '%s': %w", + command.String(), + err, + ) + } + + stderrReader, err := command.StderrPipe() + if err != nil { + return nil, fmt.Errorf( + "error retrieving stderr from command '%s': %w", + command.String(), + err, + ) + } + + if err := command.Start(); err != nil { + return nil, fmt.Errorf( + "error running command '%s': %w", + command.String(), + err, + ) + } + + _, err = io.Copy(outputHandle, stdoutReader) + if err != nil { + return nil, fmt.Errorf( + "error copying stdout from %s to file %s: %w", + command.String(), + outputHandle.Name(), + err, + ) + } + + _, err = io.Copy(outputHandle, stderrReader) + if err != nil { + return nil, fmt.Errorf( + "error copying stderr from %s to file %s: %w", + command.String(), + outputHandle.Name(), + err, + ) + } + + if err := command.Wait(); err != nil { + return nil, fmt.Errorf( + "error waiting from command '%s': %w", + command.String(), + err, + ) + } + + return NewLogInformation(outputHandle.Name(), true, false), nil +} + +func CopyFilesFromDirectory(path string) (string, error) { + // rolling logs have as suffix the current date thus + // when iterating the path files they are already in + // chronological order + files, err := os.ReadDir(path) + if err != nil { + return "", fmt.Errorf("error reading directory %s: %w", path, err) + } + + outputHandle, err := os.Create(filepath.Join(os.TempDir(), logFilename)) + if err != nil { + return "", fmt.Errorf("creating file %s: %w", outputHandle.Name(), err) + } + defer outputHandle.Close() + + for _, file := range files { + logHandle, err := os.Open(filepath.Join(path, file.Name())) + if err != nil { + return "", fmt.Errorf("error opening file %s:%w", file.Name(), err) + } + defer logHandle.Close() + + _, err = io.Copy(outputHandle, logHandle) + if err != nil { + return "", fmt.Errorf("error copying file %s:%w", logHandle.Name(), err) + } + } + + logHandle, err := os.Open(filepath.Join(path, "cloudflared.log")) + if err != nil { + return "", fmt.Errorf("error opening file %s:%w", logHandle.Name(), err) + } + defer logHandle.Close() + + _, err = io.Copy(outputHandle, logHandle) + if err != nil { + return "", fmt.Errorf("error copying file %s:%w", logHandle.Name(), err) + } + + return outputHandle.Name(), nil +} diff --git a/diagnostic/network/collector.go b/diagnostic/network/collector.go new file mode 100644 index 00000000..8a3a0fd9 --- /dev/null +++ b/diagnostic/network/collector.go @@ -0,0 +1,77 @@ +package diagnostic + +import ( + "context" + "errors" + "time" +) + +const MicrosecondsFactor = 1000.0 + +var ErrEmptyDomain = errors.New("domain must not be empty") + +// For now only support ICMP is provided. +type IPVersion int + +const ( + V4 IPVersion = iota + V6 IPVersion = iota +) + +type Hop struct { + Hop uint8 `json:"hop,omitempty"` // hop number along the route + Domain string `json:"domain,omitempty"` // domain and/or ip of the hop, this field will be '*' if the hop is a timeout + Rtts []time.Duration `json:"rtts,omitempty"` // RTT measurements in microseconds +} + +type TraceOptions struct { + ttl uint64 // number of hops to perform + timeout time.Duration // wait timeout for each response + address string // address to trace + useV4 bool +} + +func NewTimeoutHop( + hop uint8, +) *Hop { + // Whenever there is a hop in the format of 'N * * *' + // it means that the hop in the path didn't answer to + // any probe. + return NewHop( + hop, + "*", + nil, + ) +} + +func NewHop(hop uint8, domain string, rtts []time.Duration) *Hop { + return &Hop{ + hop, + domain, + rtts, + } +} + +func NewTraceOptions( + ttl uint64, + timeout time.Duration, + address string, + useV4 bool, +) TraceOptions { + return TraceOptions{ + ttl, + timeout, + address, + useV4, + } +} + +type NetworkCollector interface { + // Performs a trace route operation with the specified options. + // In case the trace fails, it will return a non-nil error and + // it may return a string which represents the raw information + // obtained. + // In case it is successful it will only return an array of Hops + // an empty string and a nil error. + Collect(ctx context.Context, options TraceOptions) ([]*Hop, string, error) +} diff --git a/diagnostic/network/collector_unix.go b/diagnostic/network/collector_unix.go new file mode 100644 index 00000000..2db2d262 --- /dev/null +++ b/diagnostic/network/collector_unix.go @@ -0,0 +1,78 @@ +//go:build darwin || linux + +package diagnostic + +import ( + "context" + "fmt" + "os/exec" + "strconv" + "strings" + "time" +) + +type NetworkCollectorImpl struct{} + +func (tracer *NetworkCollectorImpl) Collect(ctx context.Context, options TraceOptions) ([]*Hop, string, error) { + args := []string{ + "-I", + "-w", + strconv.FormatInt(int64(options.timeout.Seconds()), 10), + "-m", + strconv.FormatUint(options.ttl, 10), + options.address, + } + + var command string + + switch options.useV4 { + case false: + command = "traceroute6" + default: + command = "traceroute" + } + + process := exec.CommandContext(ctx, command, args...) + + return decodeNetworkOutputToFile(process, DecodeLine) +} + +func DecodeLine(text string) (*Hop, error) { + fields := strings.Fields(text) + parts := []string{} + filter := func(s string) bool { return s != "*" && s != "ms" } + + for _, field := range fields { + if filter(field) { + parts = append(parts, field) + } + } + + index, err := strconv.ParseUint(parts[0], 10, 8) + if err != nil { + return nil, fmt.Errorf("couldn't parse index from timeout hop: %w", err) + } + + if len(parts) == 1 { + return NewTimeoutHop(uint8(index)), nil + } + + domain := "" + rtts := []time.Duration{} + + for _, part := range parts[1:] { + rtt, err := strconv.ParseFloat(part, 64) + if err != nil { + domain += part + " " + } else { + rtts = append(rtts, time.Duration(rtt*MicrosecondsFactor)) + } + } + + domain, _ = strings.CutSuffix(domain, " ") + if domain == "" { + return nil, ErrEmptyDomain + } + + return NewHop(uint8(index), domain, rtts), nil +} diff --git a/diagnostic/network/collector_unix_test.go b/diagnostic/network/collector_unix_test.go new file mode 100644 index 00000000..5ec231a3 --- /dev/null +++ b/diagnostic/network/collector_unix_test.go @@ -0,0 +1,173 @@ +//go:build darwin || linux + +package diagnostic_test + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + diagnostic "github.com/cloudflare/cloudflared/diagnostic/network" +) + +func TestDecode(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + text string + expectedHops []*diagnostic.Hop + }{ + { + "repeated hop index parse failure", + `1 172.68.101.121 (172.68.101.121) 12.874 ms 15.517 ms 15.311 ms +2 172.68.101.121 (172.68.101.121) 12.874 ms 15.517 ms 15.311 ms +someletters * * * +4 172.68.101.121 (172.68.101.121) 12.874 ms 15.517 ms 15.311 ms `, + []*diagnostic.Hop{ + diagnostic.NewHop( + uint8(1), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + diagnostic.NewHop( + uint8(2), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + diagnostic.NewHop( + uint8(4), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + }, + }, + { + "hop index parse failure", + `1 172.68.101.121 (172.68.101.121) 12.874 ms 15.517 ms 15.311 ms +2 172.68.101.121 (172.68.101.121) 12.874 ms 15.517 ms 15.311 ms +someletters 8.8.8.8 8.8.8.9 abc ms 0.456 ms 0.789 ms`, + []*diagnostic.Hop{ + diagnostic.NewHop( + uint8(1), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + diagnostic.NewHop( + uint8(2), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + }, + }, + { + "missing rtt", + `1 172.68.101.121 (172.68.101.121) 12.874 ms 15.517 ms 15.311 ms +2 * 8.8.8.8 8.8.8.9 0.456 ms 0.789 ms`, + []*diagnostic.Hop{ + diagnostic.NewHop( + uint8(1), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + diagnostic.NewHop( + uint8(2), + "8.8.8.8 8.8.8.9", + []time.Duration{ + time.Duration(456), + time.Duration(789), + }, + ), + }, + }, + { + "simple example ipv4", + `1 172.68.101.121 (172.68.101.121) 12.874 ms 15.517 ms 15.311 ms +2 172.68.101.121 (172.68.101.121) 12.874 ms 15.517 ms 15.311 ms +3 * * *`, + []*diagnostic.Hop{ + diagnostic.NewHop( + uint8(1), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + diagnostic.NewHop( + uint8(2), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + diagnostic.NewTimeoutHop(uint8(3)), + }, + }, + { + "simple example ipv6", + ` 1 2400:cb00:107:1024::ac44:6550 12.780 ms 9.118 ms 10.046 ms + 2 2a09:bac1:: 9.945 ms 10.033 ms 11.562 ms`, + []*diagnostic.Hop{ + diagnostic.NewHop( + uint8(1), + "2400:cb00:107:1024::ac44:6550", + []time.Duration{ + time.Duration(12780), + time.Duration(9118), + time.Duration(10046), + }, + ), + diagnostic.NewHop( + uint8(2), + "2a09:bac1::", + []time.Duration{ + time.Duration(9945), + time.Duration(10033), + time.Duration(11562), + }, + ), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + hops, err := diagnostic.Decode(strings.NewReader(test.text), diagnostic.DecodeLine) + require.NoError(t, err) + assert.Equal(t, test.expectedHops, hops) + }) + } +} diff --git a/diagnostic/network/collector_utils.go b/diagnostic/network/collector_utils.go new file mode 100644 index 00000000..bfc27849 --- /dev/null +++ b/diagnostic/network/collector_utils.go @@ -0,0 +1,74 @@ +package diagnostic + +import ( + "bufio" + "bytes" + "fmt" + "io" + "os/exec" +) + +type DecodeLineFunc func(text string) (*Hop, error) + +func decodeNetworkOutputToFile(command *exec.Cmd, decodeLine DecodeLineFunc) ([]*Hop, string, error) { + stdout, err := command.StdoutPipe() + if err != nil { + return nil, "", fmt.Errorf("error piping traceroute's output: %w", err) + } + + if err := command.Start(); err != nil { + return nil, "", fmt.Errorf("error starting traceroute: %w", err) + } + + // Tee the output to a string to have the raw information + // in case the decode call fails + // This error is handled only after the Wait call below returns + // otherwise the process can become a zombie + buf := bytes.NewBuffer([]byte{}) + tee := io.TeeReader(stdout, buf) + hops, err := Decode(tee, decodeLine) + // regardless of success of the decoding + // consume all output to have available in buf + _, _ = io.ReadAll(tee) + + if werr := command.Wait(); werr != nil { + return nil, "", fmt.Errorf("error finishing traceroute: %w", werr) + } + + if err != nil { + return nil, buf.String(), err + } + + return hops, buf.String(), nil +} + +func Decode(reader io.Reader, decodeLine DecodeLineFunc) ([]*Hop, error) { + scanner := bufio.NewScanner(reader) + scanner.Split(bufio.ScanLines) + + var hops []*Hop + + for scanner.Scan() { + text := scanner.Text() + if text == "" { + continue + } + + hop, err := decodeLine(text) + if err != nil { + // This continue is here on the error case because there are lines at the start and end + // that may not be parsable. (check windows tracert output) + // The skip is here because aside from the start and end lines the other lines should + // always be parsable without errors. + continue + } + + hops = append(hops, hop) + } + + if scanner.Err() != nil { + return nil, fmt.Errorf("scanner reported an error: %w", scanner.Err()) + } + + return hops, nil +} diff --git a/diagnostic/network/collector_windows.go b/diagnostic/network/collector_windows.go new file mode 100644 index 00000000..fe91a9de --- /dev/null +++ b/diagnostic/network/collector_windows.go @@ -0,0 +1,81 @@ +//go:build windows + +package diagnostic + +import ( + "context" + "fmt" + "os/exec" + "strconv" + "strings" + "time" +) + +type NetworkCollectorImpl struct{} + +func (tracer *NetworkCollectorImpl) Collect(ctx context.Context, options TraceOptions) ([]*Hop, string, error) { + ipversion := "-4" + if !options.useV4 { + ipversion = "-6" + } + + args := []string{ + ipversion, + "-w", + strconv.FormatInt(int64(options.timeout.Seconds()), 10), + "-h", + strconv.FormatUint(options.ttl, 10), + // Do not resolve host names (can add 30+ seconds to run time) + "-d", + options.address, + } + command := exec.CommandContext(ctx, "tracert.exe", args...) + + return decodeNetworkOutputToFile(command, DecodeLine) +} + +func DecodeLine(text string) (*Hop, error) { + const requestTimedOut = "Request timed out." + + fields := strings.Fields(text) + parts := []string{} + filter := func(s string) bool { return s != "*" && s != "ms" } + + for _, field := range fields { + if filter(field) { + parts = append(parts, field) + } + } + + index, err := strconv.ParseUint(parts[0], 10, 8) + if err != nil { + return nil, fmt.Errorf("couldn't parse index from timeout hop: %w", err) + } + + domain := "" + rtts := []time.Duration{} + + for _, part := range parts[1:] { + + rtt, err := strconv.ParseFloat(strings.TrimLeft(part, "<"), 64) + + if err != nil { + domain += part + " " + } else { + rtts = append(rtts, time.Duration(rtt*MicrosecondsFactor)) + } + } + + domain, _ = strings.CutSuffix(domain, " ") + // If the domain is equal to "Request timed out." then we build a + // timeout hop. + if domain == requestTimedOut { + return NewTimeoutHop(uint8(index)), nil + } + + if domain == "" { + return nil, ErrEmptyDomain + } + + return NewHop(uint8(index), domain, rtts), nil +} diff --git a/diagnostic/network/collector_windows_test.go b/diagnostic/network/collector_windows_test.go new file mode 100644 index 00000000..3338a8bc --- /dev/null +++ b/diagnostic/network/collector_windows_test.go @@ -0,0 +1,210 @@ +//go:build windows + +package diagnostic_test + +import ( + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + diagnostic "github.com/cloudflare/cloudflared/diagnostic/network" +) + +func TestDecode(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + text string + expectedHops []*diagnostic.Hop + }{ + + { + "tracert output", + ` +Tracing route to region2.v2.argotunnel.com [198.41.200.73] +over a maximum of 5 hops: + + 1 10 ms <1 ms 1 ms 192.168.64.1 + 2 27 ms 14 ms 5 ms 192.168.1.254 + 3 * * * Request timed out. + 4 * * * Request timed out. + 5 27 ms 5 ms 5 ms 195.8.30.245 + +Trace complete. +`, + []*diagnostic.Hop{ + diagnostic.NewHop( + uint8(1), + "192.168.64.1", + []time.Duration{ + time.Duration(10000), + time.Duration(1000), + time.Duration(1000), + }, + ), + diagnostic.NewHop( + uint8(2), + "192.168.1.254", + []time.Duration{ + time.Duration(27000), + time.Duration(14000), + time.Duration(5000), + }, + ), + diagnostic.NewTimeoutHop(uint8(3)), + diagnostic.NewTimeoutHop(uint8(4)), + diagnostic.NewHop( + uint8(5), + "195.8.30.245", + []time.Duration{ + time.Duration(27000), + time.Duration(5000), + time.Duration(5000), + }, + ), + }, + }, + { + "repeated hop index parse failure", + `1 12.874 ms 15.517 ms 15.311 ms 172.68.101.121 (172.68.101.121) +2 12.874 ms 15.517 ms 15.311 ms 172.68.101.121 (172.68.101.121) +someletters * * *`, + []*diagnostic.Hop{ + diagnostic.NewHop( + uint8(1), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + diagnostic.NewHop( + uint8(2), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + }, + }, + { + "hop index parse failure", + `1 12.874 ms 15.517 ms 15.311 ms 172.68.101.121 (172.68.101.121) +2 12.874 ms 15.517 ms 15.311 ms 172.68.101.121 (172.68.101.121) +someletters abc ms 0.456 ms 0.789 ms 8.8.8.8 8.8.8.9`, + []*diagnostic.Hop{ + diagnostic.NewHop( + uint8(1), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + diagnostic.NewHop( + uint8(2), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + }, + }, + { + "missing rtt", + `1 <12.874 ms <15.517 ms <15.311 ms 172.68.101.121 (172.68.101.121) +2 * 0.456 ms 0.789 ms 8.8.8.8 8.8.8.9`, + []*diagnostic.Hop{ + diagnostic.NewHop( + uint8(1), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + diagnostic.NewHop( + uint8(2), + "8.8.8.8 8.8.8.9", + []time.Duration{ + time.Duration(456), + time.Duration(789), + }, + ), + }, + }, + { + "simple example ipv4", + `1 12.874 ms 15.517 ms 15.311 ms 172.68.101.121 (172.68.101.121) +2 12.874 ms 15.517 ms 15.311 ms 172.68.101.121 (172.68.101.121) +3 * * * Request timed out.`, + []*diagnostic.Hop{ + diagnostic.NewHop( + uint8(1), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + diagnostic.NewHop( + uint8(2), + "172.68.101.121 (172.68.101.121)", + []time.Duration{ + time.Duration(12874), + time.Duration(15517), + time.Duration(15311), + }, + ), + diagnostic.NewTimeoutHop(uint8(3)), + }, + }, + { + "simple example ipv6", + ` 1 12.780 ms 9.118 ms 10.046 ms 2400:cb00:107:1024::ac44:6550 + 2 9.945 ms 10.033 ms 11.562 ms 2a09:bac1::`, + []*diagnostic.Hop{ + diagnostic.NewHop( + uint8(1), + "2400:cb00:107:1024::ac44:6550", + []time.Duration{ + time.Duration(12780), + time.Duration(9118), + time.Duration(10046), + }, + ), + diagnostic.NewHop( + uint8(2), + "2a09:bac1::", + []time.Duration{ + time.Duration(9945), + time.Duration(10033), + time.Duration(11562), + }, + ), + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + t.Parallel() + + hops, err := diagnostic.Decode(strings.NewReader(test.text), diagnostic.DecodeLine) + require.NoError(t, err) + assert.Equal(t, test.expectedHops, hops) + }) + } +} diff --git a/diagnostic/system_collector.go b/diagnostic/system_collector.go new file mode 100644 index 00000000..96e51231 --- /dev/null +++ b/diagnostic/system_collector.go @@ -0,0 +1,150 @@ +package diagnostic + +import ( + "context" + "encoding/json" + "errors" + "strings" +) + +type SystemInformationError struct { + Err error `json:"error"` + RawInfo string `json:"rawInfo"` +} + +func (err SystemInformationError) Error() string { + return err.Err.Error() +} + +func (err SystemInformationError) MarshalJSON() ([]byte, error) { + s := map[string]string{ + "error": err.Err.Error(), + "rawInfo": err.RawInfo, + } + + return json.Marshal(s) +} + +type SystemInformationGeneralError struct { + OperatingSystemInformationError error + MemoryInformationError error + FileDescriptorsInformationError error + DiskVolumeInformationError error +} + +func (err SystemInformationGeneralError) Error() string { + builder := &strings.Builder{} + builder.WriteString("errors found:") + + if err.OperatingSystemInformationError != nil { + builder.WriteString(err.OperatingSystemInformationError.Error() + ", ") + } + + if err.MemoryInformationError != nil { + builder.WriteString(err.MemoryInformationError.Error() + ", ") + } + + if err.FileDescriptorsInformationError != nil { + builder.WriteString(err.FileDescriptorsInformationError.Error() + ", ") + } + + if err.DiskVolumeInformationError != nil { + builder.WriteString(err.DiskVolumeInformationError.Error() + ", ") + } + + return builder.String() +} + +func (err SystemInformationGeneralError) MarshalJSON() ([]byte, error) { + data := map[string]SystemInformationError{} + + var sysErr SystemInformationError + if errors.As(err.OperatingSystemInformationError, &sysErr) { + data["operatingSystemInformationError"] = sysErr + } + + if errors.As(err.MemoryInformationError, &sysErr) { + data["memoryInformationError"] = sysErr + } + + if errors.As(err.FileDescriptorsInformationError, &sysErr) { + data["fileDescriptorsInformationError"] = sysErr + } + + if errors.As(err.DiskVolumeInformationError, &sysErr) { + data["diskVolumeInformationError"] = sysErr + } + + return json.Marshal(data) +} + +type DiskVolumeInformation struct { + Name string `json:"name"` // represents the filesystem in linux/macos or device name in windows + SizeMaximum uint64 `json:"sizeMaximum"` // represents the maximum size of the disk in kilobytes + SizeCurrent uint64 `json:"sizeCurrent"` // represents the current size of the disk in kilobytes +} + +func NewDiskVolumeInformation(name string, maximum, current uint64) *DiskVolumeInformation { + return &DiskVolumeInformation{ + name, + maximum, + current, + } +} + +type SystemInformation struct { + MemoryMaximum uint64 `json:"memoryMaximum,omitempty"` // represents the maximum memory of the system in kilobytes + MemoryCurrent uint64 `json:"memoryCurrent,omitempty"` // represents the system's memory in use in kilobytes + FileDescriptorMaximum uint64 `json:"fileDescriptorMaximum,omitempty"` // represents the maximum number of file descriptors of the system + FileDescriptorCurrent uint64 `json:"fileDescriptorCurrent,omitempty"` // represents the system's file descriptors in use + OsSystem string `json:"osSystem,omitempty"` // represents the operating system name i.e.: linux, windows, darwin + HostName string `json:"hostName,omitempty"` // represents the system host name + OsVersion string `json:"osVersion,omitempty"` // detailed information about the system's release version level + OsRelease string `json:"osRelease,omitempty"` // detailed information about the system's release + Architecture string `json:"architecture,omitempty"` // represents the system's hardware platform i.e: arm64/amd64 + CloudflaredVersion string `json:"cloudflaredVersion,omitempty"` // the runtime version of cloudflared + GoVersion string `json:"goVersion,omitempty"` + GoArch string `json:"goArch,omitempty"` + Disk []*DiskVolumeInformation `json:"disk,omitempty"` +} + +func NewSystemInformation( + memoryMaximum, + memoryCurrent, + filesMaximum, + filesCurrent uint64, + osystem, + name, + osVersion, + osRelease, + architecture, + cloudflaredVersion, + goVersion, + goArchitecture string, + disk []*DiskVolumeInformation, +) *SystemInformation { + return &SystemInformation{ + memoryMaximum, + memoryCurrent, + filesMaximum, + filesCurrent, + osystem, + name, + osVersion, + osRelease, + architecture, + cloudflaredVersion, + goVersion, + goArchitecture, + disk, + } +} + +type SystemCollector interface { + // If the collection is successful it will return `SystemInformation` struct, + // and a nil error. + // + // This function expects that the caller sets the context timeout to prevent + // long-lived collectors. + Collect(ctx context.Context) (*SystemInformation, error) +} diff --git a/diagnostic/system_collector_linux.go b/diagnostic/system_collector_linux.go new file mode 100644 index 00000000..49c0e6c2 --- /dev/null +++ b/diagnostic/system_collector_linux.go @@ -0,0 +1,150 @@ +//go:build linux + +package diagnostic + +import ( + "context" + "fmt" + "os/exec" + "runtime" + "strconv" + "strings" +) + +type SystemCollectorImpl struct { + version string +} + +func NewSystemCollectorImpl( + version string, +) *SystemCollectorImpl { + return &SystemCollectorImpl{ + version, + } +} + +func (collector *SystemCollectorImpl) Collect(ctx context.Context) (*SystemInformation, error) { + memoryInfo, memoryInfoRaw, memoryInfoErr := collectMemoryInformation(ctx) + fdInfo, fdInfoRaw, fdInfoErr := collectFileDescriptorInformation(ctx) + disks, disksRaw, diskErr := collectDiskVolumeInformationUnix(ctx) + osInfo, osInfoRaw, osInfoErr := collectOSInformationUnix(ctx) + + var memoryMaximum, memoryCurrent, fileDescriptorMaximum, fileDescriptorCurrent uint64 + var osSystem, name, osVersion, osRelease, architecture string + gerror := SystemInformationGeneralError{} + + if memoryInfoErr != nil { + gerror.MemoryInformationError = SystemInformationError{ + Err: memoryInfoErr, + RawInfo: memoryInfoRaw, + } + } else { + memoryMaximum = memoryInfo.MemoryMaximum + memoryCurrent = memoryInfo.MemoryCurrent + } + + if fdInfoErr != nil { + gerror.FileDescriptorsInformationError = SystemInformationError{ + Err: fdInfoErr, + RawInfo: fdInfoRaw, + } + } else { + fileDescriptorMaximum = fdInfo.FileDescriptorMaximum + fileDescriptorCurrent = fdInfo.FileDescriptorCurrent + } + + if diskErr != nil { + gerror.DiskVolumeInformationError = SystemInformationError{ + Err: diskErr, + RawInfo: disksRaw, + } + } + + if osInfoErr != nil { + gerror.OperatingSystemInformationError = SystemInformationError{ + Err: osInfoErr, + RawInfo: osInfoRaw, + } + } else { + osSystem = osInfo.OsSystem + name = osInfo.Name + osVersion = osInfo.OsVersion + osRelease = osInfo.OsRelease + architecture = osInfo.Architecture + } + + cloudflaredVersion := collector.version + info := NewSystemInformation( + memoryMaximum, + memoryCurrent, + fileDescriptorMaximum, + fileDescriptorCurrent, + osSystem, + name, + osVersion, + osRelease, + architecture, + cloudflaredVersion, + runtime.Version(), + runtime.GOARCH, + disks, + ) + + return info, gerror +} + +func collectMemoryInformation(ctx context.Context) (*MemoryInformation, string, error) { + // This function relies on the output of `cat /proc/meminfo` to retrieve + // memoryMax and memoryCurrent. + // The expected output is in the format of `KEY VALUE UNIT`. + const ( + memTotalPrefix = "MemTotal" + memAvailablePrefix = "MemAvailable" + ) + + command := exec.CommandContext(ctx, "cat", "/proc/meminfo") + + stdout, err := command.Output() + if err != nil { + return nil, "", fmt.Errorf("error retrieving output from command '%s': %w", command.String(), err) + } + + output := string(stdout) + + mapper := func(field string) (uint64, error) { + field = strings.TrimRight(field, " kB") + + return strconv.ParseUint(field, 10, 64) + } + + memoryInfo, err := ParseMemoryInformationFromKV(output, memTotalPrefix, memAvailablePrefix, mapper) + if err != nil { + return nil, output, err + } + + // returning raw output in case other collected information + // resulted in errors + return memoryInfo, output, nil +} + +func collectFileDescriptorInformation(ctx context.Context) (*FileDescriptorInformation, string, error) { + // Command retrieved from https://docs.kernel.org/admin-guide/sysctl/fs.html#file-max-file-nr. + // If the sysctl is not available the command with fail. + command := exec.CommandContext(ctx, "sysctl", "-n", "fs.file-nr") + + stdout, err := command.Output() + if err != nil { + return nil, "", fmt.Errorf("error retrieving output from command '%s': %w", command.String(), err) + } + + output := string(stdout) + + fileDescriptorInfo, err := ParseSysctlFileDescriptorInformation(output) + if err != nil { + return nil, output, err + } + + // returning raw output in case other collected information + // resulted in errors + return fileDescriptorInfo, output, nil +} diff --git a/diagnostic/system_collector_macos.go b/diagnostic/system_collector_macos.go new file mode 100644 index 00000000..2f5ac740 --- /dev/null +++ b/diagnostic/system_collector_macos.go @@ -0,0 +1,172 @@ +//go:build darwin + +package diagnostic + +import ( + "context" + "fmt" + "os/exec" + "runtime" + "strconv" +) + +type SystemCollectorImpl struct { + version string +} + +func NewSystemCollectorImpl( + version string, +) *SystemCollectorImpl { + return &SystemCollectorImpl{ + version, + } +} + +func (collector *SystemCollectorImpl) Collect(ctx context.Context) (*SystemInformation, error) { + memoryInfo, memoryInfoRaw, memoryInfoErr := collectMemoryInformation(ctx) + fdInfo, fdInfoRaw, fdInfoErr := collectFileDescriptorInformation(ctx) + disks, disksRaw, diskErr := collectDiskVolumeInformationUnix(ctx) + osInfo, osInfoRaw, osInfoErr := collectOSInformationUnix(ctx) + + var memoryMaximum, memoryCurrent, fileDescriptorMaximum, fileDescriptorCurrent uint64 + var osSystem, name, osVersion, osRelease, architecture string + + err := SystemInformationGeneralError{ + OperatingSystemInformationError: nil, + MemoryInformationError: nil, + FileDescriptorsInformationError: nil, + DiskVolumeInformationError: nil, + } + + if memoryInfoErr != nil { + err.MemoryInformationError = SystemInformationError{ + Err: memoryInfoErr, + RawInfo: memoryInfoRaw, + } + } else { + memoryMaximum = memoryInfo.MemoryMaximum + memoryCurrent = memoryInfo.MemoryCurrent + } + + if fdInfoErr != nil { + err.FileDescriptorsInformationError = SystemInformationError{ + Err: fdInfoErr, + RawInfo: fdInfoRaw, + } + } else { + fileDescriptorMaximum = fdInfo.FileDescriptorMaximum + fileDescriptorCurrent = fdInfo.FileDescriptorCurrent + } + + if diskErr != nil { + err.DiskVolumeInformationError = SystemInformationError{ + Err: diskErr, + RawInfo: disksRaw, + } + } + + if osInfoErr != nil { + err.OperatingSystemInformationError = SystemInformationError{ + Err: osInfoErr, + RawInfo: osInfoRaw, + } + } else { + osSystem = osInfo.OsSystem + name = osInfo.Name + osVersion = osInfo.OsVersion + osRelease = osInfo.OsRelease + architecture = osInfo.Architecture + } + + cloudflaredVersion := collector.version + info := NewSystemInformation( + memoryMaximum, + memoryCurrent, + fileDescriptorMaximum, + fileDescriptorCurrent, + osSystem, + name, + osVersion, + osRelease, + architecture, + cloudflaredVersion, + runtime.Version(), + runtime.GOARCH, + disks, + ) + + return info, err +} + +func collectFileDescriptorInformation(ctx context.Context) ( + *FileDescriptorInformation, + string, + error, +) { + const ( + fileDescriptorMaximumKey = "kern.maxfiles" + fileDescriptorCurrentKey = "kern.num_files" + ) + + command := exec.CommandContext(ctx, "sysctl", fileDescriptorMaximumKey, fileDescriptorCurrentKey) + + stdout, err := command.Output() + if err != nil { + return nil, "", fmt.Errorf("error retrieving output from command '%s': %w", command.String(), err) + } + + output := string(stdout) + + fileDescriptorInfo, err := ParseFileDescriptorInformationFromKV( + output, + fileDescriptorMaximumKey, + fileDescriptorCurrentKey, + ) + if err != nil { + return nil, output, err + } + + // returning raw output in case other collected information + // resulted in errors + return fileDescriptorInfo, output, nil +} + +func collectMemoryInformation(ctx context.Context) ( + *MemoryInformation, + string, + error, +) { + const ( + memoryMaximumKey = "hw.memsize" + memoryAvailableKey = "hw.memsize_usable" + ) + + command := exec.CommandContext( + ctx, + "sysctl", + memoryMaximumKey, + memoryAvailableKey, + ) + + stdout, err := command.Output() + if err != nil { + return nil, "", fmt.Errorf("error retrieving output from command '%s': %w", command.String(), err) + } + + output := string(stdout) + + mapper := func(field string) (uint64, error) { + const kiloBytes = 1024 + value, err := strconv.ParseUint(field, 10, 64) + return value / kiloBytes, err + } + + memoryInfo, err := ParseMemoryInformationFromKV(output, memoryMaximumKey, memoryAvailableKey, mapper) + if err != nil { + return nil, output, err + } + + // returning raw output in case other collected information + // resulted in errors + return memoryInfo, output, nil +} diff --git a/diagnostic/system_collector_test.go b/diagnostic/system_collector_test.go new file mode 100644 index 00000000..c9338740 --- /dev/null +++ b/diagnostic/system_collector_test.go @@ -0,0 +1,466 @@ +package diagnostic_test + +import ( + "strconv" + "strings" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cloudflare/cloudflared/diagnostic" +) + +func TestParseMemoryInformationFromKV(t *testing.T) { + t.Parallel() + + mapper := func(field string) (uint64, error) { + value, err := strconv.ParseUint(field, 10, 64) + return value, err + } + + linuxMapper := func(field string) (uint64, error) { + field = strings.TrimRight(field, " kB") + return strconv.ParseUint(field, 10, 64) + } + + windowsMemoryOutput := ` + +FreeVirtualMemory : 5350472 +TotalVirtualMemorySize : 8903424 + + +` + macosMemoryOutput := `hw.memsize: 38654705664 +hw.memsize_usable: 38009012224` + memoryOutputWithMissingKey := `hw.memsize: 38654705664` + + linuxMemoryOutput := `MemTotal: 8028860 kB +MemFree: 731396 kB +MemAvailable: 4678844 kB +Buffers: 472632 kB +Cached: 3186492 kB +SwapCached: 4196 kB +Active: 3088988 kB +Inactive: 3468560 kB` + + tests := []struct { + name string + output string + memoryMaximumKey string + memoryAvailableKey string + expected *diagnostic.MemoryInformation + expectedErr bool + mapper func(string) (uint64, error) + }{ + { + name: "parse linux memory values", + output: linuxMemoryOutput, + memoryMaximumKey: "MemTotal", + memoryAvailableKey: "MemAvailable", + expected: &diagnostic.MemoryInformation{ + 8028860, + 8028860 - 4678844, + }, + expectedErr: false, + mapper: linuxMapper, + }, + { + name: "parse memory values with missing key", + output: memoryOutputWithMissingKey, + memoryMaximumKey: "hw.memsize", + memoryAvailableKey: "hw.memsize_usable", + expected: nil, + expectedErr: true, + mapper: mapper, + }, + { + name: "parse macos memory values", + output: macosMemoryOutput, + memoryMaximumKey: "hw.memsize", + memoryAvailableKey: "hw.memsize_usable", + expected: &diagnostic.MemoryInformation{ + 38654705664, + 38654705664 - 38009012224, + }, + expectedErr: false, + mapper: mapper, + }, + { + name: "parse windows memory values", + output: windowsMemoryOutput, + memoryMaximumKey: "TotalVirtualMemorySize", + memoryAvailableKey: "FreeVirtualMemory", + expected: &diagnostic.MemoryInformation{ + 8903424, + 8903424 - 5350472, + }, + expectedErr: false, + mapper: mapper, + }, + } + + for _, tCase := range tests { + t.Run(tCase.name, func(t *testing.T) { + t.Parallel() + memoryInfo, err := diagnostic.ParseMemoryInformationFromKV( + tCase.output, + tCase.memoryMaximumKey, + tCase.memoryAvailableKey, + tCase.mapper, + ) + + if tCase.expectedErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tCase.expected, memoryInfo) + } + }) + } +} + +func TestParseUnameOutput(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + output string + os string + expected *diagnostic.OsInfo + expectedErr bool + }{ + { + name: "darwin machine", + output: "Darwin APC 23.6.0 Darwin Kernel Version 99.6.0: Wed Jul 31 20:48:04 PDT 1997; root:xnu-66666.666.6.666.6~1/RELEASE_ARM64_T6666 arm64", + os: "darwin", + expected: &diagnostic.OsInfo{ + Architecture: "arm64", + Name: "APC", + OsSystem: "Darwin", + OsRelease: "Darwin Kernel Version 99.6.0: Wed Jul 31 20:48:04 PDT 1997; root:xnu-66666.666.6.666.6~1/RELEASE_ARM64_T6666", + OsVersion: "23.6.0", + }, + expectedErr: false, + }, + { + name: "linux machine", + output: "Linux dab00d565591 6.6.31-linuxkit #1 SMP Thu May 23 08:36:57 UTC 2024 aarch64 GNU/Linux", + os: "linux", + expected: &diagnostic.OsInfo{ + Architecture: "aarch64", + Name: "dab00d565591", + OsSystem: "Linux", + OsRelease: "#1 SMP Thu May 23 08:36:57 UTC 2024", + OsVersion: "6.6.31-linuxkit", + }, + expectedErr: false, + }, + { + name: "not enough fields", + output: "Linux ", + os: "linux", + expected: nil, + expectedErr: true, + }, + } + + for _, tCase := range tests { + t.Run(tCase.name, func(t *testing.T) { + t.Parallel() + memoryInfo, err := diagnostic.ParseUnameOutput( + tCase.output, + tCase.os, + ) + + if tCase.expectedErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tCase.expected, memoryInfo) + } + }) + } +} + +func TestParseFileDescriptorInformationFromKV(t *testing.T) { + const ( + fileDescriptorMaximumKey = "kern.maxfiles" + fileDescriptorCurrentKey = "kern.num_files" + ) + + t.Parallel() + + memoryOutput := `kern.maxfiles: 276480 +kern.num_files: 11787` + memoryOutputWithMissingKey := `kern.maxfiles: 276480` + + tests := []struct { + name string + output string + expected *diagnostic.FileDescriptorInformation + expectedErr bool + }{ + { + name: "parse memory values with missing key", + output: memoryOutputWithMissingKey, + expected: nil, + expectedErr: true, + }, + { + name: "parse macos memory values", + output: memoryOutput, + expected: &diagnostic.FileDescriptorInformation{ + 276480, + 11787, + }, + expectedErr: false, + }, + } + + for _, tCase := range tests { + t.Run(tCase.name, func(t *testing.T) { + t.Parallel() + fdInfo, err := diagnostic.ParseFileDescriptorInformationFromKV( + tCase.output, + fileDescriptorMaximumKey, + fileDescriptorCurrentKey, + ) + + if tCase.expectedErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tCase.expected, fdInfo) + } + }) + } +} + +func TestParseSysctlFileDescriptorInformation(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + output string + expected *diagnostic.FileDescriptorInformation + expectedErr bool + }{ + { + name: "expected output", + output: "111 0 1111111", + expected: &diagnostic.FileDescriptorInformation{ + FileDescriptorMaximum: 1111111, + FileDescriptorCurrent: 111, + }, + expectedErr: false, + }, + { + name: "not enough fields", + output: "111 111 ", + expected: nil, + expectedErr: true, + }, + } + + for _, tCase := range tests { + t.Run(tCase.name, func(t *testing.T) { + t.Parallel() + fdsInfo, err := diagnostic.ParseSysctlFileDescriptorInformation( + tCase.output, + ) + + if tCase.expectedErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tCase.expected, fdsInfo) + } + }) + } +} + +func TestParseWinOperatingSystemInfo(t *testing.T) { + const ( + architecturePrefix = "OSArchitecture" + osSystemPrefix = "Caption" + osVersionPrefix = "Version" + osReleasePrefix = "BuildNumber" + namePrefix = "CSName" + ) + + t.Parallel() + + windowsIncompleteOsInfo := ` +OSArchitecture : ARM 64 bits +Caption : Microsoft Windows 11 Home +Morekeys : 121314 +CSName : UTILIZA-QO859QP +` + windowsCompleteOsInfo := ` +OSArchitecture : ARM 64 bits +Caption : Microsoft Windows 11 Home +Version : 10.0.22631 +BuildNumber : 22631 +Morekeys : 121314 +CSName : UTILIZA-QO859QP +` + + tests := []struct { + name string + output string + expected *diagnostic.OsInfo + expectedErr bool + }{ + { + name: "expected output", + output: windowsCompleteOsInfo, + expected: &diagnostic.OsInfo{ + Architecture: "ARM 64 bits", + Name: "UTILIZA-QO859QP", + OsSystem: "Microsoft Windows 11 Home", + OsRelease: "22631", + OsVersion: "10.0.22631", + }, + expectedErr: false, + }, + { + name: "missing keys", + output: windowsIncompleteOsInfo, + expected: nil, + expectedErr: true, + }, + } + + for _, tCase := range tests { + t.Run(tCase.name, func(t *testing.T) { + t.Parallel() + osInfo, err := diagnostic.ParseWinOperatingSystemInfo( + tCase.output, + architecturePrefix, + osSystemPrefix, + osVersionPrefix, + osReleasePrefix, + namePrefix, + ) + + if tCase.expectedErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tCase.expected, osInfo) + } + }) + } +} + +func TestParseDiskVolumeInformationOutput(t *testing.T) { + t.Parallel() + + invalidUnixDiskVolumeInfo := `Filesystem Size Used Avail Use% Mounted on +overlay 59G 19G 38G 33% / +tmpfs 64M 0 64M 0% /dev +shm 64M 0 64M 0% /dev/shm +/run/host_mark/Users 461G 266G 195G 58% /tmp/cloudflared +/dev/vda1 59G 19G 38G 33% /etc/hosts +tmpfs 3.9G 0 3.9G 0% /sys/firmware +` + + unixDiskVolumeInfo := `Filesystem Size Used Avail Use% Mounted on +overlay 61202244 18881444 39179476 33% / +tmpfs 65536 0 65536 0% /dev +shm 65536 0 65536 0% /dev/shm +/run/host_mark/Users 482797652 278648468 204149184 58% /tmp/cloudflared +/dev/vda1 61202244 18881444 39179476 33% /etc/hosts +tmpfs 4014428 0 4014428 0% /sys/firmware` + missingFields := ` DeviceID Size +-------- ---- +C: size +E: 235563008 +Z: 67754782720 +` + invalidTypeField := ` DeviceID Size FreeSpace +-------- ---- --------- +C: size 31318736896 +D: +E: 235563008 0 +Z: 67754782720 31318732800 +` + + windowsDiskVolumeInfo := ` + +DeviceID Size FreeSpace +-------- ---- --------- +C: 67754782720 31318736896 +E: 235563008 0 +Z: 67754782720 31318732800` + + tests := []struct { + name string + output string + expected []*diagnostic.DiskVolumeInformation + skipLines int + expectedErr bool + }{ + { + name: "invalid unix disk volume information (numbers have units)", + output: invalidUnixDiskVolumeInfo, + expected: []*diagnostic.DiskVolumeInformation{}, + skipLines: 1, + expectedErr: true, + }, + { + name: "unix disk volume information", + output: unixDiskVolumeInfo, + skipLines: 1, + expected: []*diagnostic.DiskVolumeInformation{ + diagnostic.NewDiskVolumeInformation("overlay", 61202244, 18881444), + diagnostic.NewDiskVolumeInformation("tmpfs", 65536, 0), + diagnostic.NewDiskVolumeInformation("shm", 65536, 0), + diagnostic.NewDiskVolumeInformation("/run/host_mark/Users", 482797652, 278648468), + diagnostic.NewDiskVolumeInformation("/dev/vda1", 61202244, 18881444), + diagnostic.NewDiskVolumeInformation("tmpfs", 4014428, 0), + }, + expectedErr: false, + }, + { + name: "windows disk volume information", + output: windowsDiskVolumeInfo, + expected: []*diagnostic.DiskVolumeInformation{ + diagnostic.NewDiskVolumeInformation("C:", 67754782720, 31318736896), + diagnostic.NewDiskVolumeInformation("E:", 235563008, 0), + diagnostic.NewDiskVolumeInformation("Z:", 67754782720, 31318732800), + }, + skipLines: 4, + expectedErr: false, + }, + { + name: "insuficient fields", + output: missingFields, + expected: nil, + skipLines: 2, + expectedErr: true, + }, + { + name: "invalid field", + output: invalidTypeField, + expected: nil, + skipLines: 2, + expectedErr: true, + }, + } + + for _, tCase := range tests { + t.Run(tCase.name, func(t *testing.T) { + t.Parallel() + disks, err := diagnostic.ParseDiskVolumeInformationOutput(tCase.output, tCase.skipLines, 1) + + if tCase.expectedErr { + assert.Error(t, err) + } else { + require.NoError(t, err) + assert.Equal(t, tCase.expected, disks) + } + }) + } +} diff --git a/diagnostic/system_collector_utils.go b/diagnostic/system_collector_utils.go new file mode 100644 index 00000000..2ea17209 --- /dev/null +++ b/diagnostic/system_collector_utils.go @@ -0,0 +1,377 @@ +package diagnostic + +import ( + "context" + "fmt" + "os/exec" + "runtime" + "sort" + "strconv" + "strings" +) + +func findColonSeparatedPairs[V any](output string, keys []string, mapper func(string) (V, error)) map[string]V { + const ( + memoryField = 1 + memoryInformationFields = 2 + ) + + lines := strings.Split(output, "\n") + pairs := make(map[string]V, 0) + + // sort keys and lines to allow incremental search + sort.Strings(lines) + sort.Strings(keys) + + // keeps track of the last key found + lastIndex := 0 + + for _, line := range lines { + if lastIndex == len(keys) { + // already found all keys no need to continue iterating + // over the other values + break + } + + for index, key := range keys[lastIndex:] { + line = strings.TrimSpace(line) + if strings.HasPrefix(line, key) { + fields := strings.Split(line, ":") + if len(fields) < memoryInformationFields { + lastIndex = index + 1 + + break + } + + field, err := mapper(strings.TrimSpace(fields[memoryField])) + if err != nil { + lastIndex = lastIndex + index + 1 + + break + } + + pairs[key] = field + lastIndex = lastIndex + index + 1 + + break + } + } + } + + return pairs +} + +func ParseDiskVolumeInformationOutput(output string, skipLines int, scale float64) ([]*DiskVolumeInformation, error) { + const ( + diskFieldsMinimum = 3 + nameField = 0 + sizeMaximumField = 1 + sizeCurrentField = 2 + ) + + disksRaw := strings.Split(output, "\n") + disks := make([]*DiskVolumeInformation, 0) + + if skipLines > len(disksRaw) || skipLines < 0 { + skipLines = 0 + } + + for _, disk := range disksRaw[skipLines:] { + if disk == "" { + // skip empty line + continue + } + + fields := strings.Fields(disk) + if len(fields) < diskFieldsMinimum { + return nil, fmt.Errorf("expected disk volume to have %d fields got %d: %w", + diskFieldsMinimum, len(fields), ErrInsuficientFields, + ) + } + + name := fields[nameField] + + sizeMaximum, err := strconv.ParseUint(fields[sizeMaximumField], 10, 64) + if err != nil { + continue + } + + sizeCurrent, err := strconv.ParseUint(fields[sizeCurrentField], 10, 64) + if err != nil { + continue + } + + diskInfo := NewDiskVolumeInformation( + name, uint64(float64(sizeMaximum)*scale), uint64(float64(sizeCurrent)*scale), + ) + disks = append(disks, diskInfo) + } + + if len(disks) == 0 { + return nil, ErrNoVolumeFound + } + + return disks, nil +} + +type OsInfo struct { + OsSystem string + Name string + OsVersion string + OsRelease string + Architecture string +} + +func ParseUnameOutput(output string, system string) (*OsInfo, error) { + const ( + osystemField = 0 + nameField = 1 + osVersionField = 2 + osReleaseStartField = 3 + osInformationFieldsMinimum = 6 + darwin = "darwin" + ) + + architectureOffset := 2 + if system == darwin { + architectureOffset = 1 + } + + fields := strings.Fields(output) + if len(fields) < osInformationFieldsMinimum { + return nil, fmt.Errorf("expected system information to have %d fields got %d: %w", + osInformationFieldsMinimum, len(fields), ErrInsuficientFields, + ) + } + + architectureField := len(fields) - architectureOffset + osystem := fields[osystemField] + name := fields[nameField] + osVersion := fields[osVersionField] + osRelease := strings.Join(fields[osReleaseStartField:architectureField], " ") + architecture := fields[architectureField] + + return &OsInfo{ + osystem, + name, + osVersion, + osRelease, + architecture, + }, nil +} + +func ParseWinOperatingSystemInfo( + output string, + architectureKey string, + osSystemKey string, + osVersionKey string, + osReleaseKey string, + nameKey string, +) (*OsInfo, error) { + identity := func(s string) (string, error) { return s, nil } + + keys := []string{architectureKey, osSystemKey, osVersionKey, osReleaseKey, nameKey} + pairs := findColonSeparatedPairs( + output, + keys, + identity, + ) + + architecture, exists := pairs[architectureKey] + if !exists { + return nil, fmt.Errorf("parsing os information: %w, key=%s", ErrKeyNotFound, architectureKey) + } + + osSystem, exists := pairs[osSystemKey] + if !exists { + return nil, fmt.Errorf("parsing os information: %w, key=%s", ErrKeyNotFound, osSystemKey) + } + + osVersion, exists := pairs[osVersionKey] + if !exists { + return nil, fmt.Errorf("parsing os information: %w, key=%s", ErrKeyNotFound, osVersionKey) + } + + osRelease, exists := pairs[osReleaseKey] + if !exists { + return nil, fmt.Errorf("parsing os information: %w, key=%s", ErrKeyNotFound, osReleaseKey) + } + + name, exists := pairs[nameKey] + if !exists { + return nil, fmt.Errorf("parsing os information: %w, key=%s", ErrKeyNotFound, nameKey) + } + + return &OsInfo{osSystem, name, osVersion, osRelease, architecture}, nil +} + +type FileDescriptorInformation struct { + FileDescriptorMaximum uint64 + FileDescriptorCurrent uint64 +} + +func ParseSysctlFileDescriptorInformation(output string) (*FileDescriptorInformation, error) { + const ( + openFilesField = 0 + maxFilesField = 2 + fileDescriptorLimitsFields = 3 + ) + + fields := strings.Fields(output) + + if len(fields) != fileDescriptorLimitsFields { + return nil, + fmt.Errorf( + "expected file descriptor information to have %d fields got %d: %w", + fileDescriptorLimitsFields, + len(fields), + ErrInsuficientFields, + ) + } + + fileDescriptorCurrent, err := strconv.ParseUint(fields[openFilesField], 10, 64) + if err != nil { + return nil, fmt.Errorf( + "error parsing files current field '%s': %w", + fields[openFilesField], + err, + ) + } + + fileDescriptorMaximum, err := strconv.ParseUint(fields[maxFilesField], 10, 64) + if err != nil { + return nil, fmt.Errorf("error parsing files max field '%s': %w", fields[maxFilesField], err) + } + + return &FileDescriptorInformation{fileDescriptorMaximum, fileDescriptorCurrent}, nil +} + +func ParseFileDescriptorInformationFromKV( + output string, + fileDescriptorMaximumKey string, + fileDescriptorCurrentKey string, +) (*FileDescriptorInformation, error) { + mapper := func(field string) (uint64, error) { + return strconv.ParseUint(field, 10, 64) + } + + pairs := findColonSeparatedPairs(output, []string{fileDescriptorMaximumKey, fileDescriptorCurrentKey}, mapper) + + fileDescriptorMaximum, exists := pairs[fileDescriptorMaximumKey] + if !exists { + return nil, fmt.Errorf( + "parsing file descriptor information: %w, key=%s", + ErrKeyNotFound, + fileDescriptorMaximumKey, + ) + } + + fileDescriptorCurrent, exists := pairs[fileDescriptorCurrentKey] + if !exists { + return nil, fmt.Errorf( + "parsing file descriptor information: %w, key=%s", + ErrKeyNotFound, + fileDescriptorCurrentKey, + ) + } + + return &FileDescriptorInformation{fileDescriptorMaximum, fileDescriptorCurrent}, nil +} + +type MemoryInformation struct { + MemoryMaximum uint64 // size in KB + MemoryCurrent uint64 // size in KB +} + +func ParseMemoryInformationFromKV( + output string, + memoryMaximumKey string, + memoryAvailableKey string, + mapper func(field string) (uint64, error), +) (*MemoryInformation, error) { + pairs := findColonSeparatedPairs(output, []string{memoryMaximumKey, memoryAvailableKey}, mapper) + + memoryMaximum, exists := pairs[memoryMaximumKey] + if !exists { + return nil, fmt.Errorf("parsing memory information: %w, key=%s", ErrKeyNotFound, memoryMaximumKey) + } + + memoryAvailable, exists := pairs[memoryAvailableKey] + if !exists { + return nil, fmt.Errorf("parsing memory information: %w, key=%s", ErrKeyNotFound, memoryAvailableKey) + } + + memoryCurrent := memoryMaximum - memoryAvailable + + return &MemoryInformation{memoryMaximum, memoryCurrent}, nil +} + +func RawSystemInformation(osInfoRaw string, memoryInfoRaw string, fdInfoRaw string, disksRaw string) string { + var builder strings.Builder + + formatInfo := func(info string, builder *strings.Builder) { + if info == "" { + builder.WriteString("No information\n") + } else { + builder.WriteString(info) + builder.WriteString("\n") + } + } + + builder.WriteString("---BEGIN Operating system information\n") + formatInfo(osInfoRaw, &builder) + builder.WriteString("---END Operating system information\n") + builder.WriteString("---BEGIN Memory information\n") + formatInfo(memoryInfoRaw, &builder) + builder.WriteString("---END Memory information\n") + builder.WriteString("---BEGIN File descriptors information\n") + formatInfo(fdInfoRaw, &builder) + builder.WriteString("---END File descriptors information\n") + builder.WriteString("---BEGIN Disks information\n") + formatInfo(disksRaw, &builder) + builder.WriteString("---END Disks information\n") + + rawInformation := builder.String() + + return rawInformation +} + +func collectDiskVolumeInformationUnix(ctx context.Context) ([]*DiskVolumeInformation, string, error) { + command := exec.CommandContext(ctx, "df", "-k") + + stdout, err := command.Output() + if err != nil { + return nil, "", fmt.Errorf("error retrieving output from command '%s': %w", command.String(), err) + } + + output := string(stdout) + + disks, err := ParseDiskVolumeInformationOutput(output, 1, 1) + if err != nil { + return nil, output, err + } + + // returning raw output in case other collected information + // resulted in errors + return disks, output, nil +} + +func collectOSInformationUnix(ctx context.Context) (*OsInfo, string, error) { + command := exec.CommandContext(ctx, "uname", "-a") + + stdout, err := command.Output() + if err != nil { + return nil, "", fmt.Errorf("error retrieving output from command '%s': %w", command.String(), err) + } + + output := string(stdout) + + osInfo, err := ParseUnameOutput(output, runtime.GOOS) + if err != nil { + return nil, output, err + } + + // returning raw output in case other collected information + // resulted in errors + return osInfo, output, nil +} diff --git a/diagnostic/system_collector_windows.go b/diagnostic/system_collector_windows.go new file mode 100644 index 00000000..41eaafcc --- /dev/null +++ b/diagnostic/system_collector_windows.go @@ -0,0 +1,183 @@ +//go:build windows + +package diagnostic + +import ( + "context" + "fmt" + "os/exec" + "runtime" + "strconv" +) + +const kiloBytesScale = 1.0 / 1024 + +type SystemCollectorImpl struct { + version string +} + +func NewSystemCollectorImpl( + version string, +) *SystemCollectorImpl { + return &SystemCollectorImpl{ + version, + } +} + +func (collector *SystemCollectorImpl) Collect(ctx context.Context) (*SystemInformation, error) { + memoryInfo, memoryInfoRaw, memoryInfoErr := collectMemoryInformation(ctx) + disks, disksRaw, diskErr := collectDiskVolumeInformation(ctx) + osInfo, osInfoRaw, osInfoErr := collectOSInformation(ctx) + + var memoryMaximum, memoryCurrent, fileDescriptorMaximum, fileDescriptorCurrent uint64 + var osSystem, name, osVersion, osRelease, architecture string + + err := SystemInformationGeneralError{ + OperatingSystemInformationError: nil, + MemoryInformationError: nil, + FileDescriptorsInformationError: nil, + DiskVolumeInformationError: nil, + } + + if memoryInfoErr != nil { + err.MemoryInformationError = SystemInformationError{ + Err: memoryInfoErr, + RawInfo: memoryInfoRaw, + } + } else { + memoryMaximum = memoryInfo.MemoryMaximum + memoryCurrent = memoryInfo.MemoryCurrent + } + + if diskErr != nil { + err.DiskVolumeInformationError = SystemInformationError{ + Err: diskErr, + RawInfo: disksRaw, + } + } + + if osInfoErr != nil { + err.OperatingSystemInformationError = SystemInformationError{ + Err: osInfoErr, + RawInfo: osInfoRaw, + } + } else { + osSystem = osInfo.OsSystem + name = osInfo.Name + osVersion = osInfo.OsVersion + osRelease = osInfo.OsRelease + architecture = osInfo.Architecture + } + + cloudflaredVersion := collector.version + info := NewSystemInformation( + memoryMaximum, + memoryCurrent, + fileDescriptorMaximum, + fileDescriptorCurrent, + osSystem, + name, + osVersion, + osRelease, + architecture, + cloudflaredVersion, + runtime.Version(), + runtime.GOARCH, + disks, + ) + + return info, err +} + +func collectMemoryInformation(ctx context.Context) (*MemoryInformation, string, error) { + const ( + memoryTotalPrefix = "TotalVirtualMemorySize" + memoryAvailablePrefix = "FreeVirtualMemory" + ) + + command := exec.CommandContext( + ctx, + "powershell", + "-Command", + "Get-CimInstance -Class Win32_OperatingSystem | Select-Object FreeVirtualMemory, TotalVirtualMemorySize | Format-List", + ) + + stdout, err := command.Output() + if err != nil { + return nil, "", fmt.Errorf("error retrieving output from command '%s': %w", command.String(), err) + } + + output := string(stdout) + + // the result of the command above will return values in bytes hence + // they need to be converted to kilobytes + mapper := func(field string) (uint64, error) { + value, err := strconv.ParseUint(field, 10, 64) + return uint64(float64(value) * kiloBytesScale), err + } + + memoryInfo, err := ParseMemoryInformationFromKV(output, memoryTotalPrefix, memoryAvailablePrefix, mapper) + if err != nil { + return nil, output, err + } + + // returning raw output in case other collected information + // resulted in errors + return memoryInfo, output, nil +} + +func collectDiskVolumeInformation(ctx context.Context) ([]*DiskVolumeInformation, string, error) { + + command := exec.CommandContext( + ctx, + "powershell", "-Command", "Get-CimInstance -Class Win32_LogicalDisk | Select-Object DeviceID, Size, FreeSpace") + + stdout, err := command.Output() + if err != nil { + return nil, "", fmt.Errorf("error retrieving output from command '%s': %w", command.String(), err) + } + + output := string(stdout) + + disks, err := ParseDiskVolumeInformationOutput(output, 2, kiloBytesScale) + if err != nil { + return nil, output, err + } + + // returning raw output in case other collected information + // resulted in errors + return disks, output, nil +} + +func collectOSInformation(ctx context.Context) (*OsInfo, string, error) { + const ( + architecturePrefix = "OSArchitecture" + osSystemPrefix = "Caption" + osVersionPrefix = "Version" + osReleasePrefix = "BuildNumber" + namePrefix = "CSName" + ) + + command := exec.CommandContext( + ctx, + "powershell", + "-Command", + "Get-CimInstance -Class Win32_OperatingSystem | Select-Object OSArchitecture, Caption, Version, BuildNumber, CSName | Format-List", + ) + + stdout, err := command.Output() + if err != nil { + return nil, "", fmt.Errorf("error retrieving output from command '%s': %w", command.String(), err) + } + + output := string(stdout) + + osInfo, err := ParseWinOperatingSystemInfo(output, architecturePrefix, osSystemPrefix, osVersionPrefix, osReleasePrefix, namePrefix) + if err != nil { + return nil, output, err + } + + // returning raw output in case other collected information + // resulted in errors + return osInfo, output, nil +} diff --git a/edgediscovery/dial.go b/edgediscovery/dial.go index 675e5dc5..1bbf59c3 100644 --- a/edgediscovery/dial.go +++ b/edgediscovery/dial.go @@ -9,7 +9,7 @@ import ( "github.com/pkg/errors" ) -// DialEdgeWithH2Mux makes a TLS connection to a Cloudflare edge node +// DialEdge makes a TLS connection to a Cloudflare edge node func DialEdge( ctx context.Context, timeout time.Duration, @@ -36,7 +36,7 @@ func DialEdge( if err = tlsEdgeConn.Handshake(); err != nil { return nil, newDialError(err, "TLS handshake with edge error") } - // clear the deadline on the conn; h2mux has its own timeouts + // clear the deadline on the conn; http2 has its own timeouts tlsEdgeConn.SetDeadline(time.Time{}) return tlsEdgeConn, nil } diff --git a/features/features.go b/features/features.go index 76f8ff8f..574f55ae 100644 --- a/features/features.go +++ b/features/features.go @@ -8,6 +8,7 @@ const ( FeaturePostQuantum = "postquantum" FeatureQUICSupportEOF = "support_quic_eof" FeatureManagementLogs = "management_logs" + FeatureDatagramV3 = "support_datagram_v3" ) var ( diff --git a/github_release.py b/github_release.py index 8773fc43..4620a139 100755 --- a/github_release.py +++ b/github_release.py @@ -11,8 +11,9 @@ import hashlib import requests import tarfile from os import listdir -from os.path import isfile, join +from os.path import isfile, join, splitext import re +import subprocess from github import Github, GithubException, UnknownObjectException @@ -210,6 +211,61 @@ def move_asset(filepath, filename): except shutil.SameFileError: pass # the macOS release copy fails with being the same file (already in the artifacts directory) +def get_binary_version(binary_path): + """ + Sample output from go version -m : + ... + build -compiler=gc + build -ldflags="-X \"main.Version=2024.8.3-6-gec072691\" -X \"main.BuildTime=2024-09-10-1027 UTC\" " + build CGO_ENABLED=1 + ... + + This function parses the above output to retrieve the following substring 2024.8.3-6-gec072691. + To do this a start and end indexes are computed and the a slice is extracted from the output using them. + """ + needle = "main.Version=" + cmd = ['go','version', '-m', binary_path] + process = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + output, _ = process.communicate() + version_info = output.decode() + + # Find start of needle + needle_index = version_info.find(needle) + # Find backward slash relative to the beggining of the needle + relative_end_index = version_info[needle_index:].find("\\") + # Calculate needle position plus needle length to find version beggining + start_index = needle_index + len(needle) + # Calculate needle position plus relative position of the backward slash + end_index = needle_index + relative_end_index + return version_info[start_index:end_index] + +def assert_asset_version(binary_path, release_version): + """ + Asserts that the artifacts have the correct release_version. + The artifacts that are checked must not have an extension expecting .exe and .tgz. + In the occurrence of any other extension the function exits early. + """ + try: + shutil.rmtree('tmp') + except OSError: + pass + _, ext = os.path.splitext(binary_path) + if ext == '.exe' or ext == '': + binary_version = get_binary_version(binary_path) + elif ext == '.tgz': + tar = tarfile.open(binary_path, "r:gz") + tar.extractall("tmp") + tar.close() + binary_path = os.path.join(os.getcwd(), 'tmp', 'cloudflared') + binary_version = get_binary_version(binary_path) + else: + return + + if binary_version != release_version: + logging.error(f"Version mismatch {binary_path}, binary_version {binary_version} release_version {release_version}") + exit(1) + + def main(): """ Attempts to upload Asset to Github Release. Creates Release if it doesn't exist """ try: @@ -221,6 +277,7 @@ def main(): for filename in onlyfiles: binary_path = os.path.join(args.path, filename) logging.info("binary: " + binary_path) + assert_asset_version(binary_path, args.release_version) elif os.path.isfile(args.path): logging.info("binary: " + binary_path) else: @@ -229,18 +286,20 @@ def main(): else: client = Github(args.api_key) repo = client.get_repo(CLOUDFLARED_REPO) - release = get_or_create_release(repo, args.release_version, args.dry_run) if os.path.isdir(args.path): onlyfiles = [f for f in listdir(args.path) if isfile(join(args.path, f))] + for filename in onlyfiles: + binary_path = os.path.join(args.path, filename) + assert_asset_version(binary_path, args.release_version) + release = get_or_create_release(repo, args.release_version, args.dry_run) for filename in onlyfiles: binary_path = os.path.join(args.path, filename) upload_asset(release, binary_path, filename, args.release_version, args.kv_account_id, args.namespace_id, args.kv_api_token) move_asset(binary_path, filename) else: - upload_asset(release, args.path, args.name, args.release_version, args.kv_account_id, args.namespace_id, - args.kv_api_token) + raise Exception("the argument path must be a directory") except Exception as e: logging.exception(e) diff --git a/go.mod b/go.mod index b4aee5e1..e3b60fee 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/cloudflare/cloudflared go 1.22 require ( - github.com/coredns/coredns v1.10.0 + github.com/coredns/coredns v1.11.3 github.com/coreos/go-oidc/v3 v3.10.0 github.com/coreos/go-systemd/v22 v22.5.0 github.com/facebookgo/grace v0.0.0-20180706040059-75cf19382434 @@ -13,18 +13,17 @@ require ( github.com/go-chi/chi/v5 v5.0.8 github.com/go-chi/cors v1.2.1 github.com/go-jose/go-jose/v4 v4.0.1 - github.com/gobwas/ws v1.0.4 - github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 + github.com/gobwas/ws v1.2.1 github.com/google/gopacket v1.1.19 github.com/google/uuid v1.6.0 github.com/gorilla/websocket v1.4.2 github.com/json-iterator/go v1.1.12 github.com/mattn/go-colorable v0.1.13 - github.com/miekg/dns v1.1.50 + github.com/miekg/dns v1.1.58 github.com/mitchellh/go-homedir v1.1.0 github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.19.1 - github.com/prometheus/client_model v0.5.0 + github.com/prometheus/client_model v0.6.0 github.com/quic-go/quic-go v0.45.0 github.com/rs/zerolog v1.20.0 github.com/stretchr/testify v1.9.0 @@ -55,7 +54,7 @@ require ( github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/coredns/caddy v1.1.1 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect @@ -64,36 +63,36 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect - github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 // indirect + github.com/gobwas/httphead v0.1.0 // indirect github.com/gobwas/pool v0.2.1 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect + github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect github.com/klauspost/compress v1.15.11 // indirect github.com/kr/text v0.2.0 // indirect github.com/kylelemons/godebug v1.1.0 // indirect github.com/mattn/go-isatty v0.0.16 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect - github.com/onsi/ginkgo/v2 v2.9.5 // indirect + github.com/onsi/ginkgo/v2 v2.13.0 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/prometheus/common v0.48.0 // indirect + github.com/prometheus/common v0.53.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect go.opentelemetry.io/otel/metric v1.26.0 // indirect go.uber.org/mock v0.4.0 // indirect golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 // indirect golang.org/x/mod v0.17.0 // indirect - golang.org/x/oauth2 v0.17.0 // indirect + golang.org/x/oauth2 v0.18.0 // indirect golang.org/x/text v0.15.0 // indirect golang.org/x/tools v0.21.0 // indirect google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect - google.golang.org/grpc v1.63.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 // indirect + google.golang.org/grpc v1.63.2 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 70eb7d51..f16ded5b 100644 --- a/go.sum +++ b/go.sum @@ -7,13 +7,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= -github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= -github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0= github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4= -github.com/coredns/coredns v1.10.0 h1:jCfuWsBjTs0dapkkhISfPCzn5LqvSRtrFtaf/Tjj4DI= -github.com/coredns/coredns v1.10.0/go.mod h1:CIfRU5TgpuoIiJBJ4XrofQzfFQpPFh32ERpUevrSlaw= +github.com/coredns/coredns v1.11.3 h1:8RjnpZc42db5th84/QJKH2i137ecJdzZK1HJwhetSPk= +github.com/coredns/coredns v1.11.3/go.mod h1:lqFkDsHjEUdY7LJ75Nib3lwqJGip6ewWOqNIf8OavIQ= github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU= github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -24,8 +21,9 @@ github.com/cpuguy83/go-md2man/v2 v2.0.0 h1:EoUDS0afbrsXAZ9YQ9jdu/mZ2sXgT1/2yyNng github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9 h1:wWke/RUCl7VRjQhwPlR/v0glZXNYzBHdNUzf/Am2Nmg= @@ -75,19 +73,18 @@ github.com/go-playground/validator/v10 v10.11.1/go.mod h1:i+3WkQ1FvaUjjxh1kSvIA4 github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 h1:YyrUZvJaU8Q0QsoVo+xLFBgWDTam29PKea6GYmwvSiQ= -github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= +github.com/gobwas/httphead v0.1.0 h1:exrUm0f4YX0L7EBwZHuCF4GDp8aJfVeBrlLQrs6NqWU= +github.com/gobwas/httphead v0.1.0/go.mod h1:O/RXo79gxV8G+RqlR/otEwx4Q36zl9rqC5u12GKvMCM= github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/pool v0.2.1 h1:xfeeEhW7pwmX8nuLVlqbzVc7udMDrwetjEv+TZIz1og= github.com/gobwas/pool v0.2.1/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= -github.com/gobwas/ws v1.0.4 h1:5eXU1CZhpQdq5kXbKb+sECH5Ia5KiO6CYzIzdlVx6Bs= -github.com/gobwas/ws v1.0.4/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= +github.com/gobwas/ws v1.2.1 h1:F2aeBZrm2NDsc7vbovKrWSogd4wvfAxg0FQ89/iqOTk= +github.com/gobwas/ws v1.2.1/go.mod h1:hRKAFb8wOxFROYNsT1bqfWnhX+b5MFeJM9r2ZSwg/KY= github.com/goccy/go-json v0.9.11 h1:/pAaQDLHEoCq/5FFmSKBswWmK6H0e8g4159Kc/X/nqk= github.com/goccy/go-json v0.9.11/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4= -github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= @@ -102,8 +99,8 @@ github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeN github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8= github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b h1:h9U78+dx9a4BKdQkBBos92HalKpaGKHrp+3Uo6yTodo= +github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= @@ -114,7 +111,6 @@ github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0Q github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= -github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ipostelnik/cli/v2 v2.3.1-0.20210324024421-b6ea8234fe3d h1:PRDnysJ9dF1vUMmEzBu6aHQeUluSQy4eWH3RsSSy/vI= github.com/ipostelnik/cli/v2 v2.3.1-0.20210324024421-b6ea8234fe3d/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI= github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -140,10 +136,10 @@ github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovk github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA= -github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.1.58 h1:ca2Hdkz+cDg/7eNF6V56jjzuZ4aCAE+DbVkILdQWG/4= +github.com/miekg/dns v1.1.58/go.mod h1:Ypv+3b/KadlvW9vJfXOTf300O4UqaHFzFCuHz+rPkBY= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -152,16 +148,16 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= -github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= -github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= -github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= +github.com/onsi/ginkgo/v2 v2.13.0 h1:0jY9lJquiL8fcf3M4LAXN5aMlS/b2BV86HFFPCPMgE4= +github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xlT/ETL/o= +github.com/onsi/gomega v1.27.10 h1:naR28SdDFlqrG6kScpT8VWpu1xWY5nJRCF3XaYyBjhI= +github.com/onsi/gomega v1.27.10/go.mod h1:RsS8tutOdbdgzbPtzzATp12yT7kM5I5aElG3evPbQ0M= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg= github.com/pelletier/go-toml/v2 v2.0.5/go.mod h1:OMHamSCAODeSsVrwwvcJOaoN0LIUIaFVNZzmWyNfXas= -github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ= -github.com/philhofer/fwd v1.1.1/go.mod h1:gk3iGcWd9+svBvR0sR+KPcfE+RNWozjowpeBVG3ZVNU= +github.com/philhofer/fwd v1.1.2 h1:bnDivRJ1EWPjUIRXV5KfORO897HTbpFAQddBdE8t7Gw= +github.com/philhofer/fwd v1.1.2/go.mod h1:qkPdfjR2SIEbspLqpe1tO4n5yICnr2DY7mqEx2tUTP0= github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4= github.com/pingcap/errors v0.11.4/go.mod h1:Oi8TUi2kEtXXLMJk9l1cGmz20kV3TaQ0usTwv5KuLY8= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -171,10 +167,10 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_golang v1.19.1 h1:wZWJDwK+NameRJuPGDhlnFgx8e8HN3XHQeLaYJFJBOE= github.com/prometheus/client_golang v1.19.1/go.mod h1:mP78NwGzrVks5S2H6ab8+ZZGJLZUq1hoULYBAYBw1Ho= -github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw= -github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI= -github.com/prometheus/common v0.48.0 h1:QO8U2CdOzSn1BBsmXJXduaaW+dY/5QLjfB8svtSzKKE= -github.com/prometheus/common v0.48.0/go.mod h1:0/KsvlIEfPQCQ5I2iNSAWKPZziNCvRs5EC6ILDTlAPc= +github.com/prometheus/client_model v0.6.0 h1:k1v3CzpSRUTrKMppY35TLwPvxHqBu0bYgxZzqGIgaos= +github.com/prometheus/client_model v0.6.0/go.mod h1:NTQHnmxFpouOD0DpvP4XujX3CdOAGQPoaGhyTchlyt8= +github.com/prometheus/common v0.53.0 h1:U2pL9w9nmJwJDa4qqLQ3ZaePJ6ZTwt7cMD3AG3+aLCE= +github.com/prometheus/common v0.53.0/go.mod h1:BrxBKv3FWBIGXw89Mg1AeBq7FSyRzXWI3l3e7W3RN5U= github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo= github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/quic-go/quic-go v0.45.0 h1:OHmkQGM37luZITyTSu6ff03HP/2IrwDX1ZFiNEhSFUE= @@ -195,14 +191,13 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= -github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= +github.com/tinylib/msgp v1.1.8 h1:FCXC1xanKO4I8plpHGH2P7koL/RzZs12l/+r7vakfm0= +github.com/tinylib/msgp v1.1.8/go.mod h1:qkpG+2ldGg4xRFmx+jfTvZPxfGFhi64BcnL9vkCm/Tw= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0= github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY= -github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= go.opentelemetry.io/contrib/propagators v0.22.0 h1:KGdv58M2//veiYLIhb31mofaI2LgkIPXXAZVeYVyfd8= go.opentelemetry.io/contrib/propagators v0.22.0/go.mod h1:xGOuXr6lLIF9BXipA4pm6UuOSI0M98U6tsI3khbOiwU= @@ -233,39 +228,32 @@ golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842 h1:vr/HnozRka3pE4EsMEg1lgkXJ golang.org/x/exp v0.0.0-20240506185415-9bf2ced13842/go.mod h1:XtvwrStGgqGPLc4cjQfWqZHG1YFdYs6swckp8vpsjnc= golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= -golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ= -golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA= +golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= +golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -275,7 +263,6 @@ golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= @@ -287,24 +274,20 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/tools v0.21.0 h1:qc0xYgIbsSDt9EyWz05J5wfa7LOVW0YTLOXrqdLAWIw= golang.org/x/tools v0.21.0/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY= -google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0= -google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY= -google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8= -google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 h1:rIo7ocm2roD9DcFIX67Ym8icoGCKSARAiPljFhh5suQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2/go.mod h1:O1cOfN1Cy6QEYr7VxtjOyP5AdAuR0aJ/MYZaaof623Y= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 h1:NnYq6UN9ReLM9/Y01KWNOWyI5xQ9kbIms5GGJVwS/Yc= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= +google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= diff --git a/h2mux/activestreammap.go b/h2mux/activestreammap.go deleted file mode 100644 index 02386db3..00000000 --- a/h2mux/activestreammap.go +++ /dev/null @@ -1,195 +0,0 @@ -package h2mux - -import ( - "sync" - - "github.com/prometheus/client_golang/prometheus" - "golang.org/x/net/http2" -) - -var ( - ActiveStreams = prometheus.NewGauge(prometheus.GaugeOpts{ - Namespace: "cloudflared", - Subsystem: "tunnel", - Name: "active_streams", - Help: "Number of active streams created by all muxers.", - }) -) - -func init() { - prometheus.MustRegister(ActiveStreams) -} - -// activeStreamMap is used to moderate access to active streams between the read and write -// threads, and deny access to new peer streams while shutting down. -type activeStreamMap struct { - sync.RWMutex - // streams tracks open streams. - streams map[uint32]*MuxedStream - // nextStreamID is the next ID to use on our side of the connection. - // This is odd for clients, even for servers. - nextStreamID uint32 - // maxPeerStreamID is the ID of the most recent stream opened by the peer. - maxPeerStreamID uint32 - // activeStreams is a gauge shared by all muxers of this process to expose the total number of active streams - activeStreams prometheus.Gauge - - // ignoreNewStreams is true when the connection is being shut down. New streams - // cannot be registered. - ignoreNewStreams bool - // streamsEmpty is a chan that will be closed when no more streams are open. - streamsEmptyChan chan struct{} - closeOnce sync.Once -} - -func newActiveStreamMap(useClientStreamNumbers bool, activeStreams prometheus.Gauge) *activeStreamMap { - m := &activeStreamMap{ - streams: make(map[uint32]*MuxedStream), - streamsEmptyChan: make(chan struct{}), - nextStreamID: 1, - activeStreams: activeStreams, - } - // Client initiated stream uses odd stream ID, server initiated stream uses even stream ID - if !useClientStreamNumbers { - m.nextStreamID = 2 - } - return m -} - -// This function should be called while `m` is locked. -func (m *activeStreamMap) notifyStreamsEmpty() { - m.closeOnce.Do(func() { - close(m.streamsEmptyChan) - }) -} - -// Len returns the number of active streams. -func (m *activeStreamMap) Len() int { - m.RLock() - defer m.RUnlock() - return len(m.streams) -} - -func (m *activeStreamMap) Get(streamID uint32) (*MuxedStream, bool) { - m.RLock() - defer m.RUnlock() - stream, ok := m.streams[streamID] - return stream, ok -} - -// Set returns true if the stream was assigned successfully. If a stream -// already existed with that ID or we are shutting down, return false. -func (m *activeStreamMap) Set(newStream *MuxedStream) bool { - m.Lock() - defer m.Unlock() - if _, ok := m.streams[newStream.streamID]; ok { - return false - } - if m.ignoreNewStreams { - return false - } - m.streams[newStream.streamID] = newStream - m.activeStreams.Inc() - return true -} - -// Delete stops tracking the stream. It should be called only after it is closed and reset. -func (m *activeStreamMap) Delete(streamID uint32) { - m.Lock() - defer m.Unlock() - if _, ok := m.streams[streamID]; ok { - delete(m.streams, streamID) - m.activeStreams.Dec() - } - - // shutting down, and now the map is empty - if m.ignoreNewStreams && len(m.streams) == 0 { - m.notifyStreamsEmpty() - } -} - -// Shutdown blocks new streams from being created. -// It returns `done`, a channel that is closed once the last stream has closed -// and `progress`, whether a shutdown was already in progress -func (m *activeStreamMap) Shutdown() (done <-chan struct{}, alreadyInProgress bool) { - m.Lock() - defer m.Unlock() - if m.ignoreNewStreams { - // already shutting down - return m.streamsEmptyChan, true - } - m.ignoreNewStreams = true - if len(m.streams) == 0 { - // there are no streams to wait for - m.notifyStreamsEmpty() - } - return m.streamsEmptyChan, false -} - -// AcquireLocalID acquires a new stream ID for a stream you're opening. -func (m *activeStreamMap) AcquireLocalID() uint32 { - m.Lock() - defer m.Unlock() - x := m.nextStreamID - m.nextStreamID += 2 - return x -} - -// ObservePeerID observes the ID of a stream opened by the peer. It returns true if we should accept -// the new stream, or false to reject it. The ErrCode gives the reason why. -func (m *activeStreamMap) AcquirePeerID(streamID uint32) (bool, http2.ErrCode) { - m.Lock() - defer m.Unlock() - switch { - case m.ignoreNewStreams: - return false, http2.ErrCodeStreamClosed - case streamID > m.maxPeerStreamID: - m.maxPeerStreamID = streamID - return true, http2.ErrCodeNo - default: - return false, http2.ErrCodeStreamClosed - } -} - -// IsPeerStreamID is true if the stream ID belongs to the peer. -func (m *activeStreamMap) IsPeerStreamID(streamID uint32) bool { - m.RLock() - defer m.RUnlock() - return (streamID % 2) != (m.nextStreamID % 2) -} - -// IsLocalStreamID is true if it is a stream we have opened, even if it is now closed. -func (m *activeStreamMap) IsLocalStreamID(streamID uint32) bool { - m.RLock() - defer m.RUnlock() - return (streamID%2) == (m.nextStreamID%2) && streamID < m.nextStreamID -} - -// LastPeerStreamID returns the most recently opened peer stream ID. -func (m *activeStreamMap) LastPeerStreamID() uint32 { - m.RLock() - defer m.RUnlock() - return m.maxPeerStreamID -} - -// LastLocalStreamID returns the most recently opened local stream ID. -func (m *activeStreamMap) LastLocalStreamID() uint32 { - m.RLock() - defer m.RUnlock() - if m.nextStreamID > 1 { - return m.nextStreamID - 2 - } - return 0 -} - -// Abort closes every active stream and prevents new ones being created. This should be used to -// return errors in pending read/writes when the underlying connection goes away. -func (m *activeStreamMap) Abort() { - m.Lock() - defer m.Unlock() - for _, stream := range m.streams { - stream.Close() - } - m.ignoreNewStreams = true - m.notifyStreamsEmpty() -} diff --git a/h2mux/activestreammap_test.go b/h2mux/activestreammap_test.go deleted file mode 100644 index 0395b79b..00000000 --- a/h2mux/activestreammap_test.go +++ /dev/null @@ -1,195 +0,0 @@ -package h2mux - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestShutdown(t *testing.T) { - const numStreams = 1000 - m := newActiveStreamMap(true, ActiveStreams) - - // Add all the streams - { - var wg sync.WaitGroup - wg.Add(numStreams) - for i := 0; i < numStreams; i++ { - go func(streamID int) { - defer wg.Done() - stream := &MuxedStream{streamID: uint32(streamID)} - ok := m.Set(stream) - assert.True(t, ok) - }(i) - } - wg.Wait() - } - assert.Equal(t, numStreams, m.Len(), "All the streams should have been added") - - shutdownChan, alreadyInProgress := m.Shutdown() - select { - case <-shutdownChan: - assert.Fail(t, "before Shutdown(), shutdownChan shouldn't be closed") - default: - } - assert.False(t, alreadyInProgress) - - shutdownChan2, alreadyInProgress2 := m.Shutdown() - assert.Equal(t, shutdownChan, shutdownChan2, "repeated calls to Shutdown() should return the same channel") - assert.True(t, alreadyInProgress2, "repeated calls to Shutdown() should return true for 'in progress'") - - // Delete all the streams - { - var wg sync.WaitGroup - wg.Add(numStreams) - for i := 0; i < numStreams; i++ { - go func(streamID int) { - defer wg.Done() - m.Delete(uint32(streamID)) - }(i) - } - wg.Wait() - } - assert.Equal(t, 0, m.Len(), "All the streams should have been deleted") - - select { - case <-shutdownChan: - default: - assert.Fail(t, "After all the streams are deleted, shutdownChan should have been closed") - } -} - -func TestEmptyBeforeShutdown(t *testing.T) { - const numStreams = 1000 - m := newActiveStreamMap(true, ActiveStreams) - - // Add all the streams - { - var wg sync.WaitGroup - wg.Add(numStreams) - for i := 0; i < numStreams; i++ { - go func(streamID int) { - defer wg.Done() - stream := &MuxedStream{streamID: uint32(streamID)} - ok := m.Set(stream) - assert.True(t, ok) - }(i) - } - wg.Wait() - } - assert.Equal(t, numStreams, m.Len(), "All the streams should have been added") - - // Delete all the streams, bringing m to size 0 - { - var wg sync.WaitGroup - wg.Add(numStreams) - for i := 0; i < numStreams; i++ { - go func(streamID int) { - defer wg.Done() - m.Delete(uint32(streamID)) - }(i) - } - wg.Wait() - } - assert.Equal(t, 0, m.Len(), "All the streams should have been deleted") - - // Add one stream back - const soloStreamID = uint32(0) - ok := m.Set(&MuxedStream{streamID: soloStreamID}) - assert.True(t, ok) - - shutdownChan, alreadyInProgress := m.Shutdown() - select { - case <-shutdownChan: - assert.Fail(t, "before Shutdown(), shutdownChan shouldn't be closed") - default: - } - assert.False(t, alreadyInProgress) - - shutdownChan2, alreadyInProgress2 := m.Shutdown() - assert.Equal(t, shutdownChan, shutdownChan2, "repeated calls to Shutdown() should return the same channel") - assert.True(t, alreadyInProgress2, "repeated calls to Shutdown() should return true for 'in progress'") - - // Remove the remaining stream - m.Delete(soloStreamID) - - select { - case <-shutdownChan: - default: - assert.Fail(t, "After all the streams are deleted, shutdownChan should have been closed") - } -} - -type noopBuffer struct { - isClosed bool -} - -func (t *noopBuffer) Read(p []byte) (n int, err error) { return len(p), nil } -func (t *noopBuffer) Write(p []byte) (n int, err error) { return len(p), nil } -func (t *noopBuffer) Reset() {} -func (t *noopBuffer) Len() int { return 0 } -func (t *noopBuffer) Close() error { t.isClosed = true; return nil } -func (t *noopBuffer) Closed() bool { return t.isClosed } - -type noopReadyList struct{} - -func (_ *noopReadyList) Signal(streamID uint32) {} - -func TestAbort(t *testing.T) { - const numStreams = 1000 - m := newActiveStreamMap(true, ActiveStreams) - - var openedStreams sync.Map - - // Add all the streams - { - var wg sync.WaitGroup - wg.Add(numStreams) - for i := 0; i < numStreams; i++ { - go func(streamID int) { - defer wg.Done() - stream := &MuxedStream{ - streamID: uint32(streamID), - readBuffer: &noopBuffer{}, - writeBuffer: &noopBuffer{}, - readyList: &noopReadyList{}, - } - ok := m.Set(stream) - assert.True(t, ok) - - openedStreams.Store(stream.streamID, stream) - }(i) - } - wg.Wait() - } - assert.Equal(t, numStreams, m.Len(), "All the streams should have been added") - - shutdownChan, alreadyInProgress := m.Shutdown() - select { - case <-shutdownChan: - assert.Fail(t, "before Abort(), shutdownChan shouldn't be closed") - default: - } - assert.False(t, alreadyInProgress) - - m.Abort() - assert.Equal(t, numStreams, m.Len(), "Abort() shouldn't delete any streams") - openedStreams.Range(func(key interface{}, value interface{}) bool { - stream := value.(*MuxedStream) - readBuffer := stream.readBuffer.(*noopBuffer) - writeBuffer := stream.writeBuffer.(*noopBuffer) - return assert.True(t, readBuffer.isClosed && writeBuffer.isClosed, "Abort() should have closed all the streams") - }) - - select { - case <-shutdownChan: - default: - assert.Fail(t, "after Abort(), shutdownChan should have been closed") - } - - // multiple aborts shouldn't cause any issues - m.Abort() - m.Abort() - m.Abort() -} diff --git a/h2mux/bytes_counter.go b/h2mux/bytes_counter.go deleted file mode 100644 index 7260f8bb..00000000 --- a/h2mux/bytes_counter.go +++ /dev/null @@ -1,27 +0,0 @@ -package h2mux - -import ( - "sync/atomic" -) - -type AtomicCounter struct { - count uint64 -} - -func NewAtomicCounter(initCount uint64) *AtomicCounter { - return &AtomicCounter{count: initCount} -} - -func (c *AtomicCounter) IncrementBy(number uint64) { - atomic.AddUint64(&c.count, number) -} - -// Count returns the current value of counter and reset it to 0 -func (c *AtomicCounter) Count() uint64 { - return atomic.SwapUint64(&c.count, 0) -} - -// Value returns the current value of counter -func (c *AtomicCounter) Value() uint64 { - return atomic.LoadUint64(&c.count) -} diff --git a/h2mux/bytes_counter_test.go b/h2mux/bytes_counter_test.go deleted file mode 100644 index da579aaf..00000000 --- a/h2mux/bytes_counter_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package h2mux - -import ( - "sync" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestCounter(t *testing.T) { - var wg sync.WaitGroup - wg.Add(dataPoints) - c := AtomicCounter{} - for i := 0; i < dataPoints; i++ { - go func() { - defer wg.Done() - c.IncrementBy(uint64(1)) - }() - } - wg.Wait() - assert.Equal(t, uint64(dataPoints), c.Count()) - assert.Equal(t, uint64(0), c.Count()) -} diff --git a/h2mux/error.go b/h2mux/error.go deleted file mode 100644 index 923eb335..00000000 --- a/h2mux/error.go +++ /dev/null @@ -1,66 +0,0 @@ -package h2mux - -import ( - "fmt" - - "golang.org/x/net/http2" -) - -var ( - // HTTP2 error codes: https://http2.github.io/http2-spec/#ErrorCodes - ErrHandshakeTimeout = MuxerHandshakeError{"1000 handshake timeout"} - ErrBadHandshakeNotSettings = MuxerHandshakeError{"1001 unexpected response"} - ErrBadHandshakeUnexpectedAck = MuxerHandshakeError{"1002 unexpected response"} - ErrBadHandshakeNoMagic = MuxerHandshakeError{"1003 unexpected response"} - ErrBadHandshakeWrongMagic = MuxerHandshakeError{"1004 connected to endpoint of wrong type"} - ErrBadHandshakeNotSettingsAck = MuxerHandshakeError{"1005 unexpected response"} - ErrBadHandshakeUnexpectedSettings = MuxerHandshakeError{"1006 unexpected response"} - - ErrUnexpectedFrameType = MuxerProtocolError{"2001 unexpected frame type", http2.ErrCodeProtocol} - ErrUnknownStream = MuxerProtocolError{"2002 unknown stream", http2.ErrCodeProtocol} - ErrInvalidStream = MuxerProtocolError{"2003 invalid stream", http2.ErrCodeProtocol} - ErrNotRPCStream = MuxerProtocolError{"2004 not RPC stream", http2.ErrCodeProtocol} - - ErrStreamHeadersSent = MuxerApplicationError{"3000 headers already sent"} - ErrStreamRequestConnectionClosed = MuxerApplicationError{"3001 connection closed while opening stream"} - ErrConnectionDropped = MuxerApplicationError{"3002 connection dropped"} - ErrStreamRequestTimeout = MuxerApplicationError{"3003 open stream timeout"} - ErrResponseHeadersTimeout = MuxerApplicationError{"3004 timeout waiting for initial response headers"} - ErrResponseHeadersConnectionClosed = MuxerApplicationError{"3005 connection closed while waiting for initial response headers"} - - ErrClosedStream = MuxerStreamError{"4000 stream closed", http2.ErrCodeStreamClosed} -) - -type MuxerHandshakeError struct { - cause string -} - -func (e MuxerHandshakeError) Error() string { - return fmt.Sprintf("Handshake error: %s", e.cause) -} - -type MuxerProtocolError struct { - cause string - h2code http2.ErrCode -} - -func (e MuxerProtocolError) Error() string { - return fmt.Sprintf("Protocol error: %s", e.cause) -} - -type MuxerApplicationError struct { - cause string -} - -func (e MuxerApplicationError) Error() string { - return fmt.Sprintf("Application error: %s", e.cause) -} - -type MuxerStreamError struct { - cause string - h2code http2.ErrCode -} - -func (e MuxerStreamError) Error() string { - return fmt.Sprintf("Stream error: %s", e.cause) -} diff --git a/h2mux/h2_compressor.go b/h2mux/h2_compressor.go deleted file mode 100644 index 7d609305..00000000 --- a/h2mux/h2_compressor.go +++ /dev/null @@ -1,17 +0,0 @@ -package h2mux - -import ( - "io" -) - -func CompressionIsSupported() bool { - return false -} - -func newDecompressor(src io.Reader) decompressor { - return nil -} - -func newCompressor(dst io.Writer, quality, lgwin int) compressor { - return nil -} diff --git a/h2mux/h2_dictionaries.go b/h2mux/h2_dictionaries.go deleted file mode 100644 index 5d11bee7..00000000 --- a/h2mux/h2_dictionaries.go +++ /dev/null @@ -1,596 +0,0 @@ -package h2mux - -import ( - "bytes" - "io" - "strings" - "sync" - - "golang.org/x/net/http2" -) - -/* This is an implementation of https://github.com/vkrasnov/h2-compression-dictionaries -but modified for tunnels in a few key ways: -Since tunnels is a server-to-server service, some aspects of the spec would cause -unnecessary head-of-line blocking on the CPU and on the network, hence this implementation -allows for parallel compression on the "client", and buffering on the "server" to solve -this problem. */ - -// Assign temporary values -const SettingCompression http2.SettingID = 0xff20 - -const ( - FrameSetCompressionContext http2.FrameType = 0xf0 - FrameUseDictionary http2.FrameType = 0xf1 - FrameSetDictionary http2.FrameType = 0xf2 -) - -const ( - FlagSetDictionaryAppend http2.Flags = 0x1 - FlagSetDictionaryOffset http2.Flags = 0x2 -) - -const compressionVersion = uint8(1) -const compressionFormat = uint8(2) - -type CompressionSetting uint - -const ( - CompressionNone CompressionSetting = iota - CompressionLow - CompressionMedium - CompressionMax -) - -type CompressionPreset struct { - nDicts, dictSize, quality uint8 -} - -type compressor interface { - Write([]byte) (int, error) - Flush() error - SetDictionary([]byte) - Close() error -} - -type decompressor interface { - Read([]byte) (int, error) - SetDictionary([]byte) - Close() error -} - -var compressionPresets = map[CompressionSetting]CompressionPreset{ - CompressionNone: {0, 0, 0}, - CompressionLow: {32, 17, 5}, - CompressionMedium: {64, 18, 6}, - CompressionMax: {255, 19, 9}, -} - -func compressionSettingVal(version, fmt, sz, nd uint8) uint32 { - // Currently the compression settings are include: - // * version: only 1 is supported - // * fmt: only 2 for brotli is supported - // * sz: log2 of the maximal allowed dictionary size - // * nd: max allowed number of dictionaries - return uint32(version)<<24 + uint32(fmt)<<16 + uint32(sz)<<8 + uint32(nd) -} - -func parseCompressionSettingVal(setting uint32) (version, fmt, sz, nd uint8) { - version = uint8(setting >> 24) - fmt = uint8(setting >> 16) - sz = uint8(setting >> 8) - nd = uint8(setting) - return -} - -func (c CompressionSetting) toH2Setting() uint32 { - p, ok := compressionPresets[c] - if !ok { - return 0 - } - return compressionSettingVal(compressionVersion, compressionFormat, p.dictSize, p.nDicts) -} - -func (c CompressionSetting) getPreset() CompressionPreset { - return compressionPresets[c] -} - -type dictUpdate struct { - reader *h2DictionaryReader - dictionary *h2ReadDictionary - buff []byte - isReady bool - isUse bool - s setDictRequest -} - -type h2ReadDictionary struct { - dictionary []byte - queue []*dictUpdate - maxSize int -} - -type h2ReadDictionaries struct { - d []h2ReadDictionary - maxSize int -} - -type h2DictionaryReader struct { - *SharedBuffer // Propagate the decompressed output into the original buffer - decompBuffer *bytes.Buffer // Intermediate buffer for the brotli compressor - dictionary []byte // The content of the dictionary being used by this reader - internalBuffer []byte - s, e int // Start and end of the buffer - decomp decompressor // The brotli compressor - isClosed bool // Indicates that Close was called for this reader - queue []*dictUpdate // List of dictionaries to update, when the data is available -} - -type h2WriteDictionary []byte - -type setDictRequest struct { - streamID uint32 - dictID uint8 - dictSZ uint64 - truncate, offset uint64 - P, E, D bool -} - -type useDictRequest struct { - dictID uint8 - streamID uint32 - setDict []setDictRequest -} - -type h2WriteDictionaries struct { - dictLock sync.Mutex - dictChan chan useDictRequest - dictionaries []h2WriteDictionary - nextAvail int // next unused dictionary slot - maxAvail int // max ID, defined by SETTINGS - maxSize int // max size, defined by SETTINGS - typeToDict map[string]uint8 // map from content type to dictionary that encodes it - pathToDict map[string]uint8 // map from path to dictionary that encodes it - quality int - window int - compIn, compOut *AtomicCounter -} - -type h2DictWriter struct { - *bytes.Buffer - comp compressor - dicts *h2WriteDictionaries - writerLock sync.Mutex - - streamID uint32 - path string - contentType string -} - -type h2Dictionaries struct { - write *h2WriteDictionaries - read *h2ReadDictionaries -} - -func (o *dictUpdate) update(buff []byte) { - o.buff = make([]byte, len(buff)) - copy(o.buff, buff) - o.isReady = true -} - -func (d *h2ReadDictionary) update() { - for len(d.queue) > 0 { - o := d.queue[0] - if !o.isReady { - break - } - if o.isUse { - reader := o.reader - reader.dictionary = make([]byte, len(d.dictionary)) - copy(reader.dictionary, d.dictionary) - reader.decomp = newDecompressor(reader.decompBuffer) - if len(reader.dictionary) > 0 { - reader.decomp.SetDictionary(reader.dictionary) - } - reader.Write([]byte{}) - } else { - d.dictionary = adjustDictionary(d.dictionary, o.buff, o.s, d.maxSize) - } - d.queue = d.queue[1:] - } -} - -func newH2ReadDictionaries(nd, sz uint8) h2ReadDictionaries { - d := make([]h2ReadDictionary, int(nd)) - for i := range d { - d[i].maxSize = 1 << uint(sz) - } - return h2ReadDictionaries{d: d, maxSize: 1 << uint(sz)} -} - -func (dicts *h2ReadDictionaries) getDictByID(dictID uint8) (*h2ReadDictionary, error) { - if int(dictID) > len(dicts.d) { - return nil, MuxerStreamError{"dictID too big", http2.ErrCodeProtocol} - } - - return &dicts.d[dictID], nil -} - -func (dicts *h2ReadDictionaries) newReader(b *SharedBuffer, dictID uint8) *h2DictionaryReader { - if int(dictID) > len(dicts.d) { - return nil - } - - dictionary := &dicts.d[dictID] - reader := &h2DictionaryReader{SharedBuffer: b, decompBuffer: &bytes.Buffer{}, internalBuffer: make([]byte, dicts.maxSize)} - - if len(dictionary.queue) == 0 { - reader.dictionary = make([]byte, len(dictionary.dictionary)) - copy(reader.dictionary, dictionary.dictionary) - reader.decomp = newDecompressor(reader.decompBuffer) - if len(reader.dictionary) > 0 { - reader.decomp.SetDictionary(reader.dictionary) - } - } else { - dictionary.queue = append(dictionary.queue, &dictUpdate{isUse: true, isReady: true, reader: reader}) - } - return reader -} - -func (r *h2DictionaryReader) updateWaitingDictionaries() { - // Update all the waiting dictionaries - for _, o := range r.queue { - if o.isReady { - continue - } - if r.isClosed || uint64(r.e) >= o.s.dictSZ { - o.update(r.internalBuffer[:r.e]) - if o == o.dictionary.queue[0] { - defer o.dictionary.update() - } - } - } -} - -// Write actually happens when reading from network, this is therefore the stage where we decompress the buffer -func (r *h2DictionaryReader) Write(p []byte) (n int, err error) { - // Every write goes into brotli buffer first - n, err = r.decompBuffer.Write(p) - if err != nil { - return - } - - if r.decomp == nil { - return - } - - for { - m, err := r.decomp.Read(r.internalBuffer[r.e:]) - if err != nil && err != io.EOF { - r.SharedBuffer.Close() - r.decomp.Close() - return n, err - } - - r.SharedBuffer.Write(r.internalBuffer[r.e : r.e+m]) - r.e += m - - if m == 0 { - break - } - - if r.e == len(r.internalBuffer) { - r.updateWaitingDictionaries() - r.e = 0 - } - } - - r.updateWaitingDictionaries() - - if r.isClosed { - r.SharedBuffer.Close() - r.decomp.Close() - } - - return -} - -func (r *h2DictionaryReader) Close() error { - if r.isClosed { - return nil - } - r.isClosed = true - r.Write([]byte{}) - return nil -} - -var compressibleTypes = map[string]bool{ - "application/atom+xml": true, - "application/javascript": true, - "application/json": true, - "application/ld+json": true, - "application/manifest+json": true, - "application/rss+xml": true, - "application/vnd.geo+json": true, - "application/vnd.ms-fontobject": true, - "application/x-font-ttf": true, - "application/x-yaml": true, - "application/x-web-app-manifest+json": true, - "application/xhtml+xml": true, - "application/xml": true, - "font/opentype": true, - "image/bmp": true, - "image/svg+xml": true, - "image/x-icon": true, - "text/cache-manifest": true, - "text/css": true, - "text/html": true, - "text/plain": true, - "text/vcard": true, - "text/vnd.rim.location.xloc": true, - "text/vtt": true, - "text/x-component": true, - "text/x-cross-domain-policy": true, - "text/x-yaml": true, -} - -func getContentType(headers []Header) string { - for _, h := range headers { - if strings.ToLower(h.Name) == "content-type" { - val := strings.ToLower(h.Value) - sep := strings.IndexRune(val, ';') - if sep != -1 { - return val[:sep] - } - return val - } - } - - return "" -} - -func newH2WriteDictionaries(nd, sz, quality uint8, compIn, compOut *AtomicCounter) (*h2WriteDictionaries, chan useDictRequest) { - useDictChan := make(chan useDictRequest) - return &h2WriteDictionaries{ - dictionaries: make([]h2WriteDictionary, nd), - nextAvail: 0, - maxAvail: int(nd), - maxSize: 1 << uint(sz), - dictChan: useDictChan, - typeToDict: make(map[string]uint8), - pathToDict: make(map[string]uint8), - quality: int(quality), - window: 1 << uint(sz+1), - compIn: compIn, - compOut: compOut, - }, useDictChan -} - -func adjustDictionary(currentDictionary, newData []byte, set setDictRequest, maxSize int) []byte { - currentDictionary = append(currentDictionary, newData[:set.dictSZ]...) - - if len(currentDictionary) > maxSize { - currentDictionary = currentDictionary[len(currentDictionary)-maxSize:] - } - - return currentDictionary -} - -func (h2d *h2WriteDictionaries) getNextDictID() (dictID uint8, ok bool) { - if h2d.nextAvail < h2d.maxAvail { - dictID, ok = uint8(h2d.nextAvail), true - h2d.nextAvail++ - return - } - - return 0, false -} - -func (h2d *h2WriteDictionaries) getGenericDictID() (dictID uint8, ok bool) { - if h2d.maxAvail == 0 { - return 0, false - } - return uint8(h2d.maxAvail - 1), true -} - -func (h2d *h2WriteDictionaries) getDictWriter(s *MuxedStream, headers []Header) *h2DictWriter { - w := s.writeBuffer - - if w == nil { - return nil - } - - if s.method != "GET" && s.method != "POST" { - return nil - } - - s.contentType = getContentType(headers) - if _, ok := compressibleTypes[s.contentType]; !ok && !strings.HasPrefix(s.contentType, "text") { - return nil - } - - return &h2DictWriter{ - Buffer: w.(*bytes.Buffer), - path: s.path, - contentType: s.contentType, - streamID: s.streamID, - dicts: h2d, - } -} - -func assignDictToStream(s *MuxedStream, p []byte) bool { - - // On first write to stream: - // * assign the right dictionary - // * update relevant dictionaries - // * send the required USE_DICT and SET_DICT frames - - h2d := s.dictionaries.write - if h2d == nil { - return false - } - - w, ok := s.writeBuffer.(*h2DictWriter) - if !ok || w.comp != nil { - return false - } - - h2d.dictLock.Lock() - - if w.comp != nil { - // Check again with lock, in therory the interface allows for unordered writes - h2d.dictLock.Unlock() - return false - } - - // The logic of dictionary generation is below - - // Is there a dictionary for the exact path or content-type? - var useID uint8 - pathID, pathFound := h2d.pathToDict[w.path] - typeID, typeFound := h2d.typeToDict[w.contentType] - - if pathFound { - // Use dictionary for path as top priority - useID = pathID - if !typeFound { // Shouldn't really happen, unless type changes between requests - typeID, typeFound = h2d.getNextDictID() - if typeFound { - h2d.typeToDict[w.contentType] = typeID - } - } - } else if typeFound { - // Use dictionary for same content type as second priority - useID = typeID - pathID, pathFound = h2d.getNextDictID() - if pathFound { // If a slot is available, generate new dictionary for path - h2d.pathToDict[w.path] = pathID - } - } else { - // Use the overflow dictionary as last resort - // If slots are available generate new dictionaries for path and content-type - useID, _ = h2d.getGenericDictID() - pathID, pathFound = h2d.getNextDictID() - if pathFound { - h2d.pathToDict[w.path] = pathID - } - typeID, typeFound = h2d.getNextDictID() - if typeFound { - h2d.typeToDict[w.contentType] = typeID - } - } - - useLen := h2d.maxSize - if len(p) < useLen { - useLen = len(p) - } - - // Update all the dictionaries using the new data - setDicts := make([]setDictRequest, 0, 3) - setDict := setDictRequest{ - streamID: w.streamID, - dictID: useID, - dictSZ: uint64(useLen), - } - setDicts = append(setDicts, setDict) - if pathID != useID { - setDict.dictID = pathID - setDicts = append(setDicts, setDict) - } - if typeID != useID { - setDict.dictID = typeID - setDicts = append(setDicts, setDict) - } - - h2d.dictChan <- useDictRequest{streamID: w.streamID, dictID: uint8(useID), setDict: setDicts} - - dict := h2d.dictionaries[useID] - - // Brolti requires the dictionary to be immutable - copyDict := make([]byte, len(dict)) - copy(copyDict, dict) - - for _, set := range setDicts { - h2d.dictionaries[set.dictID] = adjustDictionary(h2d.dictionaries[set.dictID], p, set, h2d.maxSize) - } - - w.comp = newCompressor(w.Buffer, h2d.quality, h2d.window) - - s.writeLock.Lock() - h2d.dictLock.Unlock() - - if len(copyDict) > 0 { - w.comp.SetDictionary(copyDict) - } - - return true -} - -func (w *h2DictWriter) Write(p []byte) (n int, err error) { - bufLen := w.Buffer.Len() - if w.comp != nil { - n, err = w.comp.Write(p) - if err != nil { - return - } - err = w.comp.Flush() - w.dicts.compIn.IncrementBy(uint64(n)) - w.dicts.compOut.IncrementBy(uint64(w.Buffer.Len() - bufLen)) - return - } - return w.Buffer.Write(p) -} - -func (w *h2DictWriter) Close() error { - if w.comp != nil { - return w.comp.Close() - } - return nil -} - -// From http2/hpack -func http2ReadVarInt(n byte, p []byte) (remain []byte, v uint64, err error) { - if n < 1 || n > 8 { - panic("bad n") - } - if len(p) == 0 { - return nil, 0, MuxerStreamError{"unexpected EOF", http2.ErrCodeProtocol} - } - v = uint64(p[0]) - if n < 8 { - v &= (1 << uint64(n)) - 1 - } - if v < (1< 0 { - b := p[0] - p = p[1:] - v += uint64(b&127) << m - if b&128 == 0 { - return p, v, nil - } - m += 7 - if m >= 63 { - return origP, 0, MuxerStreamError{"invalid integer", http2.ErrCodeProtocol} - } - } - return nil, 0, MuxerStreamError{"unexpected EOF", http2.ErrCodeProtocol} -} - -func appendVarInt(dst []byte, n byte, i uint64) []byte { - k := uint64((1 << n) - 1) - if i < k { - return append(dst, byte(i)) - } - dst = append(dst, byte(k)) - i -= k - for ; i >= 128; i >>= 7 { - dst = append(dst, byte(0x80|(i&0x7f))) - } - return append(dst, byte(i)) -} diff --git a/h2mux/h2mux.go b/h2mux/h2mux.go deleted file mode 100644 index c7c75f3b..00000000 --- a/h2mux/h2mux.go +++ /dev/null @@ -1,506 +0,0 @@ -package h2mux - -import ( - "context" - "io" - "strings" - "sync" - "time" - - "github.com/prometheus/client_golang/prometheus" - "github.com/rs/zerolog" - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" - "golang.org/x/sync/errgroup" -) - -const ( - defaultFrameSize uint32 = 1 << 14 // Minimum frame size in http2 spec - defaultWindowSize uint32 = (1 << 16) - 1 // Minimum window size in http2 spec - maxWindowSize uint32 = (1 << 31) - 1 // 2^31-1 = 2147483647, max window size in http2 spec - defaultTimeout time.Duration = 5 * time.Second - defaultRetries uint64 = 5 - defaultWriteBufferMaxLen int = 1024 * 1024 // 1mb - writeBufferInitialSize int = 16 * 1024 // 16KB - - SettingMuxerMagic http2.SettingID = 0x42db - MuxerMagicOrigin uint32 = 0xa2e43c8b - MuxerMagicEdge uint32 = 0x1088ebf9 -) - -type MuxedStreamHandler interface { - ServeStream(*MuxedStream) error -} - -type MuxedStreamFunc func(stream *MuxedStream) error - -func (f MuxedStreamFunc) ServeStream(stream *MuxedStream) error { - return f(stream) -} - -type MuxerConfig struct { - Timeout time.Duration - Handler MuxedStreamHandler - IsClient bool - // Name is used to identify this muxer instance when logging. - Name string - // The minimum time this connection can be idle before sending a heartbeat. - HeartbeatInterval time.Duration - // The minimum number of heartbeats to send before terminating the connection. - MaxHeartbeats uint64 - // Logger to use - Log *zerolog.Logger - CompressionQuality CompressionSetting - // Initial size for HTTP2 flow control windows - DefaultWindowSize uint32 - // Largest allowable size for HTTP2 flow control windows - MaxWindowSize uint32 - // Largest allowable capacity for the buffer of data to be sent - StreamWriteBufferMaxLen int -} - -type Muxer struct { - // f is used to read and write HTTP2 frames on the wire. - f *http2.Framer - // config is the MuxerConfig given in Handshake. - config MuxerConfig - // w, r are references to the underlying connection used. - w io.WriteCloser - r io.ReadCloser - // muxReader is the read process. - muxReader *MuxReader - // muxWriter is the write process. - muxWriter *MuxWriter - // muxMetricsUpdater is the process to update metrics - muxMetricsUpdater muxMetricsUpdater - // newStreamChan is used to create new streams on the writer thread. - // The writer will assign the next available stream ID. - newStreamChan chan MuxedStreamRequest - // abortChan is used to abort the writer event loop. - abortChan chan struct{} - // abortOnce is used to ensure abortChan is closed once only. - abortOnce sync.Once - // readyList is used to signal writable streams. - readyList *ReadyList - // streams tracks currently-open streams. - streams *activeStreamMap - // explicitShutdown records whether the Muxer is closing because Shutdown was called, or due to another - // error. - explicitShutdown *BooleanFuse - - compressionQuality CompressionPreset -} - -func RPCHeaders() []Header { - return []Header{ - {Name: ":method", Value: "RPC"}, - {Name: ":scheme", Value: "capnp"}, - {Name: ":path", Value: "*"}, - } -} - -// Handshake establishes a muxed connection with the peer. -// After the handshake completes, it is possible to open and accept streams. -func Handshake( - w io.WriteCloser, - r io.ReadCloser, - config MuxerConfig, - activeStreamsMetrics prometheus.Gauge, -) (*Muxer, error) { - // Set default config values - if config.Timeout == 0 { - config.Timeout = defaultTimeout - } - if config.DefaultWindowSize == 0 { - config.DefaultWindowSize = defaultWindowSize - } - if config.MaxWindowSize == 0 { - config.MaxWindowSize = maxWindowSize - } - if config.StreamWriteBufferMaxLen == 0 { - config.StreamWriteBufferMaxLen = defaultWriteBufferMaxLen - } - // Initialise connection state fields - m := &Muxer{ - f: http2.NewFramer(w, r), // A framer that writes to w and reads from r - config: config, - w: w, - r: r, - newStreamChan: make(chan MuxedStreamRequest), - abortChan: make(chan struct{}), - readyList: NewReadyList(), - streams: newActiveStreamMap(config.IsClient, activeStreamsMetrics), - } - - m.f.ReadMetaHeaders = hpack.NewDecoder(4096, func(hpack.HeaderField) {}) - // Initialise the settings to identify this connection and confirm the other end is sane. - handshakeSetting := http2.Setting{ID: SettingMuxerMagic, Val: MuxerMagicEdge} - compressionSetting := http2.Setting{ID: SettingCompression, Val: 0} - - expectedMagic := MuxerMagicOrigin - if config.IsClient { - handshakeSetting.Val = MuxerMagicOrigin - expectedMagic = MuxerMagicEdge - } - errChan := make(chan error, 2) - // Simultaneously send our settings and verify the peer's settings. - go func() { errChan <- m.f.WriteSettings(handshakeSetting, compressionSetting) }() - go func() { errChan <- m.readPeerSettings(expectedMagic) }() - err := joinErrorsWithTimeout(errChan, 2, config.Timeout, ErrHandshakeTimeout) - if err != nil { - return nil, err - } - // Confirm sanity by ACKing the frame and expecting an ACK for our frame. - // Not strictly necessary, but let's pretend to be H2-like. - go func() { errChan <- m.f.WriteSettingsAck() }() - go func() { errChan <- m.readPeerSettingsAck() }() - err = joinErrorsWithTimeout(errChan, 2, config.Timeout, ErrHandshakeTimeout) - if err != nil { - return nil, err - } - - // set up reader/writer pair ready for serve - streamErrors := NewStreamErrorMap() - goAwayChan := make(chan http2.ErrCode, 1) - inBoundCounter := NewAtomicCounter(0) - outBoundCounter := NewAtomicCounter(0) - pingTimestamp := NewPingTimestamp() - connActive := NewSignal() - idleDuration := config.HeartbeatInterval - // Sanity check to ensure idelDuration is sane - if idleDuration == 0 || idleDuration < defaultTimeout { - idleDuration = defaultTimeout - config.Log.Info().Msgf("muxer: Minimum idle time has been adjusted to %d", defaultTimeout) - } - maxRetries := config.MaxHeartbeats - if maxRetries == 0 { - maxRetries = defaultRetries - config.Log.Info().Msgf("muxer: Minimum number of unacked heartbeats to send before closing the connection has been adjusted to %d", maxRetries) - } - - compBytesBefore, compBytesAfter := NewAtomicCounter(0), NewAtomicCounter(0) - - m.muxMetricsUpdater = newMuxMetricsUpdater( - m.abortChan, - compBytesBefore, - compBytesAfter, - ) - - m.explicitShutdown = NewBooleanFuse() - m.muxReader = &MuxReader{ - f: m.f, - handler: m.config.Handler, - streams: m.streams, - readyList: m.readyList, - streamErrors: streamErrors, - goAwayChan: goAwayChan, - abortChan: m.abortChan, - pingTimestamp: pingTimestamp, - connActive: connActive, - initialStreamWindow: m.config.DefaultWindowSize, - streamWindowMax: m.config.MaxWindowSize, - streamWriteBufferMaxLen: m.config.StreamWriteBufferMaxLen, - r: m.r, - metricsUpdater: m.muxMetricsUpdater, - bytesRead: inBoundCounter, - } - m.muxWriter = &MuxWriter{ - f: m.f, - streams: m.streams, - streamErrors: streamErrors, - readyStreamChan: m.readyList.ReadyChannel(), - newStreamChan: m.newStreamChan, - goAwayChan: goAwayChan, - abortChan: m.abortChan, - pingTimestamp: pingTimestamp, - idleTimer: NewIdleTimer(idleDuration, maxRetries), - connActiveChan: connActive.WaitChannel(), - maxFrameSize: defaultFrameSize, - metricsUpdater: m.muxMetricsUpdater, - bytesWrote: outBoundCounter, - } - m.muxWriter.headerEncoder = hpack.NewEncoder(&m.muxWriter.headerBuffer) - - if m.compressionQuality.dictSize > 0 && m.compressionQuality.nDicts > 0 { - nd, sz := m.compressionQuality.nDicts, m.compressionQuality.dictSize - writeDicts, dictChan := newH2WriteDictionaries( - nd, - sz, - m.compressionQuality.quality, - compBytesBefore, - compBytesAfter, - ) - readDicts := newH2ReadDictionaries(nd, sz) - m.muxReader.dictionaries = h2Dictionaries{read: &readDicts, write: writeDicts} - m.muxWriter.useDictChan = dictChan - } - - return m, nil -} - -func (m *Muxer) readPeerSettings(magic uint32) error { - frame, err := m.f.ReadFrame() - if err != nil { - return err - } - settingsFrame, ok := frame.(*http2.SettingsFrame) - if !ok { - return ErrBadHandshakeNotSettings - } - if settingsFrame.Header().Flags != 0 { - return ErrBadHandshakeUnexpectedAck - } - peerMagic, ok := settingsFrame.Value(SettingMuxerMagic) - if !ok { - return ErrBadHandshakeNoMagic - } - if magic != peerMagic { - return ErrBadHandshakeWrongMagic - } - peerCompression, ok := settingsFrame.Value(SettingCompression) - if !ok { - m.compressionQuality = compressionPresets[CompressionNone] - return nil - } - ver, fmt, sz, nd := parseCompressionSettingVal(peerCompression) - if ver != compressionVersion || fmt != compressionFormat || sz == 0 || nd == 0 { - m.compressionQuality = compressionPresets[CompressionNone] - return nil - } - // Values used for compression are the minimum between the two peers - if sz < m.compressionQuality.dictSize { - m.compressionQuality.dictSize = sz - } - if nd < m.compressionQuality.nDicts { - m.compressionQuality.nDicts = nd - } - return nil -} - -func (m *Muxer) readPeerSettingsAck() error { - frame, err := m.f.ReadFrame() - if err != nil { - return err - } - settingsFrame, ok := frame.(*http2.SettingsFrame) - if !ok { - return ErrBadHandshakeNotSettingsAck - } - if settingsFrame.Header().Flags != http2.FlagSettingsAck { - return ErrBadHandshakeUnexpectedSettings - } - return nil -} - -func joinErrorsWithTimeout(errChan <-chan error, receiveCount int, timeout time.Duration, timeoutError error) error { - for i := 0; i < receiveCount; i++ { - select { - case err := <-errChan: - if err != nil { - return err - } - case <-time.After(timeout): - return timeoutError - } - } - return nil -} - -// Serve runs the event loops that comprise h2mux: -// - MuxReader.run() -// - MuxWriter.run() -// - muxMetricsUpdater.run() -// In the normal case, Shutdown() is called concurrently with Serve() to stop -// these loops. -func (m *Muxer) Serve(ctx context.Context) error { - errGroup, _ := errgroup.WithContext(ctx) - errGroup.Go(func() error { - ch := make(chan error) - go func() { - err := m.muxReader.run(m.config.Log) - m.explicitShutdown.Fuse(false) - m.r.Close() - m.abort() - // don't block if parent goroutine quit early - select { - case ch <- err: - default: - } - }() - select { - case err := <-ch: - return err - case <-ctx.Done(): - return ctx.Err() - } - }) - - errGroup.Go(func() error { - ch := make(chan error) - go func() { - err := m.muxWriter.run(m.config.Log) - m.explicitShutdown.Fuse(false) - m.w.Close() - m.abort() - // don't block if parent goroutine quit early - select { - case ch <- err: - default: - } - }() - select { - case err := <-ch: - return err - case <-ctx.Done(): - return ctx.Err() - } - }) - - errGroup.Go(func() error { - ch := make(chan error) - go func() { - err := m.muxMetricsUpdater.run(m.config.Log) - // don't block if parent goroutine quit early - select { - case ch <- err: - default: - } - }() - select { - case err := <-ch: - return err - case <-ctx.Done(): - return ctx.Err() - } - }) - - err := errGroup.Wait() - if isUnexpectedTunnelError(err, m.explicitShutdown.Value()) { - return err - } - return nil -} - -// Shutdown is called to initiate the "happy path" of muxer termination. -// It blocks new streams from being created. -// It returns a channel that is closed when the last stream has been closed. -func (m *Muxer) Shutdown() <-chan struct{} { - m.explicitShutdown.Fuse(true) - return m.muxReader.Shutdown() -} - -// IsUnexpectedTunnelError identifies errors that are expected when shutting down the h2mux tunnel. -// The set of expected errors change depending on whether we initiated shutdown or not. -func isUnexpectedTunnelError(err error, expectedShutdown bool) bool { - if err == nil { - return false - } - if !expectedShutdown { - return true - } - return !isConnectionClosedError(err) -} - -func isConnectionClosedError(err error) bool { - if err == io.EOF { - return true - } - if err == io.ErrClosedPipe { - return true - } - if err.Error() == "tls: use of closed connection" { - return true - } - if strings.HasSuffix(err.Error(), "use of closed network connection") { - return true - } - return false -} - -// OpenStream opens a new data stream with the given headers. -// Called by proxy server and tunnel -func (m *Muxer) OpenStream(ctx context.Context, headers []Header, body io.Reader) (*MuxedStream, error) { - stream := m.NewStream(headers) - if err := m.MakeMuxedStreamRequest(ctx, NewMuxedStreamRequest(stream, body)); err != nil { - return nil, err - } - if err := m.AwaitResponseHeaders(ctx, stream); err != nil { - return nil, err - } - return stream, nil -} - -func (m *Muxer) OpenRPCStream(ctx context.Context) (*MuxedStream, error) { - stream := m.NewStream(RPCHeaders()) - if err := m.MakeMuxedStreamRequest(ctx, NewMuxedStreamRequest(stream, nil)); err != nil { - stream.Close() - return nil, err - } - if err := m.AwaitResponseHeaders(ctx, stream); err != nil { - stream.Close() - return nil, err - } - if !IsRPCStreamResponse(stream) { - stream.Close() - return nil, ErrNotRPCStream - } - return stream, nil -} - -func (m *Muxer) NewStream(headers []Header) *MuxedStream { - return NewStream(m.config, headers, m.readyList, m.muxReader.dictionaries) -} - -func (m *Muxer) MakeMuxedStreamRequest(ctx context.Context, request MuxedStreamRequest) error { - select { - case <-ctx.Done(): - return ErrStreamRequestTimeout - case <-m.abortChan: - return ErrStreamRequestConnectionClosed - // Will be received by mux writer - case m.newStreamChan <- request: - return nil - } -} - -func (m *Muxer) CloseStreamRead(stream *MuxedStream) { - stream.CloseRead() - if stream.WriteClosed() { - m.streams.Delete(stream.streamID) - } -} - -func (m *Muxer) AwaitResponseHeaders(ctx context.Context, stream *MuxedStream) error { - select { - case <-ctx.Done(): - return ErrResponseHeadersTimeout - case <-m.abortChan: - return ErrResponseHeadersConnectionClosed - case <-stream.responseHeadersReceived: - return nil - } -} - -func (m *Muxer) Metrics() *MuxerMetrics { - return m.muxMetricsUpdater.metrics() -} - -func (m *Muxer) abort() { - m.abortOnce.Do(func() { - close(m.abortChan) - m.readyList.Close() - m.streams.Abort() - }) -} - -// Return how many retries/ticks since the connection was last marked active -func (m *Muxer) TimerRetries() uint64 { - return m.muxWriter.idleTimer.RetryCount() -} - -func IsRPCStreamResponse(stream *MuxedStream) bool { - headers := stream.Headers - return len(headers) == 1 && - headers[0].Name == ":status" && - headers[0].Value == "200" -} diff --git a/h2mux/h2mux_test.go b/h2mux/h2mux_test.go deleted file mode 100644 index de79068e..00000000 --- a/h2mux/h2mux_test.go +++ /dev/null @@ -1,909 +0,0 @@ -package h2mux - -import ( - "bytes" - "context" - "fmt" - "io" - "math/rand" - "net" - "os" - "strconv" - "strings" - "sync" - "testing" - "time" - - "github.com/pkg/errors" - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" - "golang.org/x/sync/errgroup" -) - -const ( - testOpenStreamTimeout = time.Millisecond * 5000 - testHandshakeTimeout = time.Millisecond * 1000 -) - -var log = zerolog.Nop() - -func TestMain(m *testing.M) { - if os.Getenv("VERBOSE") == "1" { - //TODO: set log level - } - os.Exit(m.Run()) -} - -type DefaultMuxerPair struct { - OriginMuxConfig MuxerConfig - OriginMux *Muxer - OriginConn net.Conn - EdgeMuxConfig MuxerConfig - EdgeMux *Muxer - EdgeConn net.Conn - doneC chan struct{} -} - -func NewDefaultMuxerPair(t assert.TestingT, testName string, f MuxedStreamFunc) *DefaultMuxerPair { - origin, edge := net.Pipe() - p := &DefaultMuxerPair{ - OriginMuxConfig: MuxerConfig{ - Timeout: testHandshakeTimeout, - Handler: f, - IsClient: true, - Name: "origin", - Log: &log, - DefaultWindowSize: (1 << 8) - 1, - MaxWindowSize: (1 << 15) - 1, - StreamWriteBufferMaxLen: 1024, - HeartbeatInterval: defaultTimeout, - MaxHeartbeats: defaultRetries, - }, - OriginConn: origin, - EdgeMuxConfig: MuxerConfig{ - Timeout: testHandshakeTimeout, - IsClient: false, - Name: "edge", - Log: &log, - DefaultWindowSize: (1 << 8) - 1, - MaxWindowSize: (1 << 15) - 1, - StreamWriteBufferMaxLen: 1024, - HeartbeatInterval: defaultTimeout, - MaxHeartbeats: defaultRetries, - }, - EdgeConn: edge, - doneC: make(chan struct{}), - } - assert.NoError(t, p.Handshake(testName)) - return p -} - -func NewCompressedMuxerPair(t assert.TestingT, testName string, quality CompressionSetting, f MuxedStreamFunc) *DefaultMuxerPair { - origin, edge := net.Pipe() - p := &DefaultMuxerPair{ - OriginMuxConfig: MuxerConfig{ - Timeout: time.Second, - Handler: f, - IsClient: true, - Name: "origin", - CompressionQuality: quality, - Log: &log, - HeartbeatInterval: defaultTimeout, - MaxHeartbeats: defaultRetries, - }, - OriginConn: origin, - EdgeMuxConfig: MuxerConfig{ - Timeout: time.Second, - IsClient: false, - Name: "edge", - CompressionQuality: quality, - Log: &log, - HeartbeatInterval: defaultTimeout, - MaxHeartbeats: defaultRetries, - }, - EdgeConn: edge, - doneC: make(chan struct{}), - } - assert.NoError(t, p.Handshake(testName)) - return p -} - -func (p *DefaultMuxerPair) Handshake(testName string) error { - ctx, cancel := context.WithTimeout(context.Background(), testHandshakeTimeout) - defer cancel() - errGroup, _ := errgroup.WithContext(ctx) - errGroup.Go(func() (err error) { - p.EdgeMux, err = Handshake(p.EdgeConn, p.EdgeConn, p.EdgeMuxConfig, ActiveStreams) - return errors.Wrap(err, "edge handshake failure") - }) - errGroup.Go(func() (err error) { - p.OriginMux, err = Handshake(p.OriginConn, p.OriginConn, p.OriginMuxConfig, ActiveStreams) - return errors.Wrap(err, "origin handshake failure") - }) - - return errGroup.Wait() -} - -func (p *DefaultMuxerPair) Serve(t assert.TestingT) { - ctx := context.Background() - var wg sync.WaitGroup - wg.Add(2) - go func() { - err := p.EdgeMux.Serve(ctx) - if err != nil && err != io.EOF && err != io.ErrClosedPipe { - t.Errorf("error in edge muxer Serve(): %s", err) - } - p.OriginMux.Shutdown() - wg.Done() - }() - go func() { - err := p.OriginMux.Serve(ctx) - if err != nil && err != io.EOF && err != io.ErrClosedPipe { - t.Errorf("error in origin muxer Serve(): %s", err) - } - p.EdgeMux.Shutdown() - wg.Done() - }() - go func() { - // notify when both muxes have stopped serving - wg.Wait() - close(p.doneC) - }() -} - -func (p *DefaultMuxerPair) Wait(t *testing.T) { - select { - case <-p.doneC: - return - case <-time.After(5 * time.Second): - t.Fatal("timeout waiting for shutdown") - } -} - -func (p *DefaultMuxerPair) OpenEdgeMuxStream(headers []Header, body io.Reader) (*MuxedStream, error) { - ctx, cancel := context.WithTimeout(context.Background(), testOpenStreamTimeout) - defer cancel() - return p.EdgeMux.OpenStream(ctx, headers, body) -} - -func TestHandshake(t *testing.T) { - f := func(stream *MuxedStream) error { - return nil - } - muxPair := NewDefaultMuxerPair(t, t.Name(), f) - AssertIfPipeReadable(t, muxPair.OriginConn) - AssertIfPipeReadable(t, muxPair.EdgeConn) -} - -func TestSingleStream(t *testing.T) { - f := MuxedStreamFunc(func(stream *MuxedStream) error { - if len(stream.Headers) != 1 { - t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers)) - } - if stream.Headers[0].Name != "test-header" { - t.Fatalf("expected header name %s, got %s", "test-header", stream.Headers[0].Name) - } - if stream.Headers[0].Value != "headerValue" { - t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value) - } - _ = stream.WriteHeaders([]Header{ - {Name: "response-header", Value: "responseValue"}, - }) - buf := []byte("Hello world") - _, _ = stream.Write(buf) - n, err := io.ReadFull(stream, buf) - if n > 0 { - t.Fatalf("read %d bytes after EOF", n) - } - if err != io.EOF { - t.Fatalf("expected EOF, got %s", err) - } - return nil - }) - muxPair := NewDefaultMuxerPair(t, t.Name(), f) - muxPair.Serve(t) - - stream, err := muxPair.OpenEdgeMuxStream( - []Header{{Name: "test-header", Value: "headerValue"}}, - nil, - ) - if err != nil { - t.Fatalf("error in OpenStream: %s", err) - } - if len(stream.Headers) != 1 { - t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers)) - } - if stream.Headers[0].Name != "response-header" { - t.Fatalf("expected header name %s, got %s", "response-header", stream.Headers[0].Name) - } - if stream.Headers[0].Value != "responseValue" { - t.Fatalf("expected header value %s, got %s", "responseValue", stream.Headers[0].Value) - } - responseBody := make([]byte, 11) - n, err := io.ReadFull(stream, responseBody) - if err != nil { - t.Fatalf("error from (*MuxedStream).Read: %s", err) - } - if n != len(responseBody) { - t.Fatalf("expected response body to have %d bytes, got %d", len(responseBody), n) - } - if string(responseBody) != "Hello world" { - t.Fatalf("expected response body %s, got %s", "Hello world", responseBody) - } - _ = stream.Close() - n, err = stream.Write([]byte("aaaaa")) - if n > 0 { - t.Fatalf("wrote %d bytes after EOF", n) - } - if err != io.EOF { - t.Fatalf("expected EOF, got %s", err) - } -} - -func TestSingleStreamLargeResponseBody(t *testing.T) { - bodySize := 1 << 24 - f := MuxedStreamFunc(func(stream *MuxedStream) error { - if len(stream.Headers) != 1 { - t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers)) - } - if stream.Headers[0].Name != "test-header" { - t.Fatalf("expected header name %s, got %s", "test-header", stream.Headers[0].Name) - } - if stream.Headers[0].Value != "headerValue" { - t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value) - } - _ = stream.WriteHeaders([]Header{ - {Name: "response-header", Value: "responseValue"}, - }) - payload := make([]byte, bodySize) - for i := range payload { - payload[i] = byte(i % 256) - } - t.Log("Writing payload...") - n, err := stream.Write(payload) - t.Logf("Wrote %d bytes into the stream", n) - if err != nil { - t.Fatalf("origin write error: %s", err) - } - if n != len(payload) { - t.Fatalf("origin short write: %d/%d bytes", n, len(payload)) - } - - return nil - }) - muxPair := NewDefaultMuxerPair(t, t.Name(), f) - muxPair.Serve(t) - - stream, err := muxPair.OpenEdgeMuxStream( - []Header{{Name: "test-header", Value: "headerValue"}}, - nil, - ) - if err != nil { - t.Fatalf("error in OpenStream: %s", err) - } - if len(stream.Headers) != 1 { - t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers)) - } - if stream.Headers[0].Name != "response-header" { - t.Fatalf("expected header name %s, got %s", "response-header", stream.Headers[0].Name) - } - if stream.Headers[0].Value != "responseValue" { - t.Fatalf("expected header value %s, got %s", "responseValue", stream.Headers[0].Value) - } - responseBody := make([]byte, bodySize) - - n, err := io.ReadFull(stream, responseBody) - if err != nil { - t.Fatalf("error from (*MuxedStream).Read: %s", err) - } - if n != len(responseBody) { - t.Fatalf("expected response body to have %d bytes, got %d", len(responseBody), n) - } -} - -func TestMultipleStreams(t *testing.T) { - f := MuxedStreamFunc(func(stream *MuxedStream) error { - if len(stream.Headers) != 1 { - t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers)) - } - if stream.Headers[0].Name != "client-token" { - t.Fatalf("expected header name %s, got %s", "client-token", stream.Headers[0].Name) - } - log.Debug().Msgf("Got request for stream %s", stream.Headers[0].Value) - _ = stream.WriteHeaders([]Header{ - {Name: "response-token", Value: stream.Headers[0].Value}, - }) - log.Debug().Msgf("Wrote headers for stream %s", stream.Headers[0].Value) - _, _ = stream.Write([]byte("OK")) - log.Debug().Msgf("Wrote body for stream %s", stream.Headers[0].Value) - return nil - }) - muxPair := NewDefaultMuxerPair(t, t.Name(), f) - muxPair.Serve(t) - - maxStreams := 64 - errorsC := make(chan error, maxStreams) - var wg sync.WaitGroup - wg.Add(maxStreams) - for i := 0; i < maxStreams; i++ { - go func(tokenId int) { - defer wg.Done() - tokenString := fmt.Sprintf("%d", tokenId) - stream, err := muxPair.OpenEdgeMuxStream( - []Header{{Name: "client-token", Value: tokenString}}, - nil, - ) - log.Debug().Msgf("Got headers for stream %d", tokenId) - if err != nil { - errorsC <- err - return - } - if len(stream.Headers) != 1 { - errorsC <- fmt.Errorf("stream %d has error: expected %d headers, got %d", stream.streamID, 1, len(stream.Headers)) - return - } - if stream.Headers[0].Name != "response-token" { - errorsC <- fmt.Errorf("stream %d has error: expected header name %s, got %s", stream.streamID, "response-token", stream.Headers[0].Name) - return - } - if stream.Headers[0].Value != tokenString { - errorsC <- fmt.Errorf("stream %d has error: expected header value %s, got %s", stream.streamID, tokenString, stream.Headers[0].Value) - return - } - responseBody := make([]byte, 2) - n, err := io.ReadFull(stream, responseBody) - if err != nil { - errorsC <- fmt.Errorf("stream %d has error: error from (*MuxedStream).Read: %s", stream.streamID, err) - return - } - if n != len(responseBody) { - errorsC <- fmt.Errorf("stream %d has error: expected response body to have %d bytes, got %d", stream.streamID, len(responseBody), n) - return - } - if string(responseBody) != "OK" { - errorsC <- fmt.Errorf("stream %d has error: expected response body %s, got %s", stream.streamID, "OK", responseBody) - return - } - }(i) - } - wg.Wait() - close(errorsC) - testFail := false - for err := range errorsC { - testFail = true - log.Error().Msgf("%s", err) - } - if testFail { - t.Fatalf("TestMultipleStreams failed") - } -} - -func TestMultipleStreamsFlowControl(t *testing.T) { - maxStreams := 32 - responseSizes := make([]int32, maxStreams) - for i := 0; i < maxStreams; i++ { - responseSizes[i] = rand.Int31n(int32(defaultWindowSize << 4)) - } - - f := MuxedStreamFunc(func(stream *MuxedStream) error { - if len(stream.Headers) != 1 { - t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers)) - } - if stream.Headers[0].Name != "test-header" { - t.Fatalf("expected header name %s, got %s", "test-header", stream.Headers[0].Name) - } - if stream.Headers[0].Value != "headerValue" { - t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value) - } - _ = stream.WriteHeaders([]Header{ - {Name: "response-header", Value: "responseValue"}, - }) - payload := make([]byte, responseSizes[(stream.streamID-2)/2]) - for i := range payload { - payload[i] = byte(i % 256) - } - n, err := stream.Write(payload) - if err != nil { - t.Fatalf("origin write error: %s", err) - } - if n != len(payload) { - t.Fatalf("origin short write: %d/%d bytes", n, len(payload)) - } - return nil - }) - muxPair := NewDefaultMuxerPair(t, t.Name(), f) - muxPair.Serve(t) - - errGroup, _ := errgroup.WithContext(context.Background()) - for i := 0; i < maxStreams; i++ { - errGroup.Go(func() error { - stream, err := muxPair.OpenEdgeMuxStream( - []Header{{Name: "test-header", Value: "headerValue"}}, - nil, - ) - if err != nil { - return fmt.Errorf("error in OpenStream: %d %s", stream.streamID, err) - } - if len(stream.Headers) != 1 { - return fmt.Errorf("stream %d expected %d headers, got %d", stream.streamID, 1, len(stream.Headers)) - } - if stream.Headers[0].Name != "response-header" { - return fmt.Errorf("stream %d expected header name %s, got %s", stream.streamID, "response-header", stream.Headers[0].Name) - } - if stream.Headers[0].Value != "responseValue" { - return fmt.Errorf("stream %d expected header value %s, got %s", stream.streamID, "responseValue", stream.Headers[0].Value) - } - - responseBody := make([]byte, responseSizes[(stream.streamID-2)/2]) - n, err := io.ReadFull(stream, responseBody) - if err != nil { - return fmt.Errorf("stream %d error from (*MuxedStream).Read: %s", stream.streamID, err) - } - if n != len(responseBody) { - return fmt.Errorf("stream %d expected response body to have %d bytes, got %d", stream.streamID, len(responseBody), n) - } - return nil - }) - } - assert.NoError(t, errGroup.Wait()) -} - -func TestGracefulShutdown(t *testing.T) { - sendC := make(chan struct{}) - responseBuf := bytes.Repeat([]byte("Hello world"), 65536) - - f := MuxedStreamFunc(func(stream *MuxedStream) error { - _ = stream.WriteHeaders([]Header{ - {Name: "response-header", Value: "responseValue"}, - }) - <-sendC - log.Debug().Msgf("Writing %d bytes", len(responseBuf)) - _, _ = stream.Write(responseBuf) - _ = stream.CloseWrite() - log.Debug().Msgf("Wrote %d bytes", len(responseBuf)) - // Reading from the stream will block until the edge closes its end of the stream. - // Otherwise, we'll close the whole connection before receiving the 'stream closed' - // message from the edge. - // Graceful shutdown works if you omit this, it just gives spurious errors for now - - // TODO ignore errors when writing 'stream closed' and we're shutting down. - _, _ = stream.Read([]byte{0}) - log.Debug().Msgf("Handler ends") - return nil - }) - muxPair := NewDefaultMuxerPair(t, t.Name(), f) - muxPair.Serve(t) - - stream, err := muxPair.OpenEdgeMuxStream( - []Header{{Name: "test-header", Value: "headerValue"}}, - nil, - ) - if err != nil { - t.Fatalf("error in OpenStream: %s", err) - } - // Start graceful shutdown of the edge mux - this should also close the origin mux when done - muxPair.EdgeMux.Shutdown() - close(sendC) - responseBody := make([]byte, len(responseBuf)) - log.Debug().Msgf("Waiting for %d bytes", len(responseBuf)) - n, err := io.ReadFull(stream, responseBody) - if err != nil { - t.Fatalf("error from (*MuxedStream).Read with %d bytes read: %s", n, err) - } - if n != len(responseBody) { - t.Fatalf("expected response body to have %d bytes, got %d", len(responseBody), n) - } - if !bytes.Equal(responseBuf, responseBody) { - t.Fatalf("response body mismatch") - } - _ = stream.Close() - muxPair.Wait(t) -} - -func TestUnexpectedShutdown(t *testing.T) { - sendC := make(chan struct{}) - handlerFinishC := make(chan struct{}) - responseBuf := bytes.Repeat([]byte("Hello world"), 65536) - - f := MuxedStreamFunc(func(stream *MuxedStream) error { - defer close(handlerFinishC) - _ = stream.WriteHeaders([]Header{ - {Name: "response-header", Value: "responseValue"}, - }) - <-sendC - n, err := stream.Read([]byte{0}) - if err != io.EOF { - t.Fatalf("unexpected error from (*MuxedStream).Read: %s", err) - } - if n != 0 { - t.Fatalf("expected empty read, got %d bytes", n) - } - // Write comes after read, because write buffers data before it is flushed. It wouldn't know about EOF - // until some time later. Calling read first forces it to know about EOF now. - _, err = stream.Write(responseBuf) - if err != io.EOF { - t.Fatalf("unexpected error from (*MuxedStream).Write: %s", err) - } - return nil - }) - muxPair := NewDefaultMuxerPair(t, t.Name(), f) - muxPair.Serve(t) - - stream, err := muxPair.OpenEdgeMuxStream( - []Header{{Name: "test-header", Value: "headerValue"}}, - nil, - ) - // Close the underlying connection before telling the origin to write. - _ = muxPair.EdgeConn.Close() - close(sendC) - if err != nil { - t.Fatalf("error in OpenStream: %s", err) - } - responseBody := make([]byte, len(responseBuf)) - n, err := io.ReadFull(stream, responseBody) - if err != io.EOF { - t.Fatalf("unexpected error from (*MuxedStream).Read: %s", err) - } - if n != 0 { - t.Fatalf("expected response body to have %d bytes, got %d", 0, n) - } - // The write ordering requirement explained in the origin handler applies here too. - _, err = stream.Write(responseBuf) - if err != io.EOF { - t.Fatalf("unexpected error from (*MuxedStream).Write: %s", err) - } - <-handlerFinishC -} - -func EchoHandler(stream *MuxedStream) error { - var buf bytes.Buffer - _, _ = fmt.Fprintf(&buf, "Hello, world!\n\n# REQUEST HEADERS:\n\n") - for _, header := range stream.Headers { - _, _ = fmt.Fprintf(&buf, "[%s] = %s\n", header.Name, header.Value) - } - _ = stream.WriteHeaders([]Header{ - {Name: ":status", Value: "200"}, - {Name: "server", Value: "Echo-server/1.0"}, - {Name: "date", Value: time.Now().Format(time.RFC850)}, - {Name: "content-type", Value: "text/html; charset=utf-8"}, - {Name: "content-length", Value: strconv.Itoa(buf.Len())}, - }) - _, _ = buf.WriteTo(stream) - return nil -} - -func TestOpenAfterDisconnect(t *testing.T) { - for i := 0; i < 3; i++ { - muxPair := NewDefaultMuxerPair(t, fmt.Sprintf("%s_%d", t.Name(), i), EchoHandler) - muxPair.Serve(t) - - switch i { - case 0: - // Close both directions of the connection to cause EOF on both peers. - _ = muxPair.OriginConn.Close() - _ = muxPair.EdgeConn.Close() - case 1: - // Close origin conn to cause EOF on origin first. - _ = muxPair.OriginConn.Close() - case 2: - // Close edge conn to cause EOF on edge first. - _ = muxPair.EdgeConn.Close() - } - - _, err := muxPair.OpenEdgeMuxStream( - []Header{{Name: "test-header", Value: "headerValue"}}, - nil, - ) - if err != ErrStreamRequestConnectionClosed && err != ErrResponseHeadersConnectionClosed { - t.Fatalf("case %v: unexpected error in OpenStream: %v", i, err) - } - } -} - -func TestHPACK(t *testing.T) { - muxPair := NewDefaultMuxerPair(t, t.Name(), EchoHandler) - muxPair.Serve(t) - - stream, err := muxPair.OpenEdgeMuxStream( - []Header{ - {Name: ":method", Value: "RPC"}, - {Name: ":scheme", Value: "capnp"}, - {Name: ":path", Value: "*"}, - }, - nil, - ) - if err != nil { - t.Fatalf("error in OpenStream: %s", err) - } - _ = stream.Close() - - for i := 0; i < 3; i++ { - stream, err := muxPair.OpenEdgeMuxStream( - []Header{ - {Name: ":method", Value: "GET"}, - {Name: ":scheme", Value: "https"}, - {Name: ":authority", Value: "tunnel.otterlyadorable.co.uk"}, - {Name: ":path", Value: "/get"}, - {Name: "accept-encoding", Value: "gzip"}, - {Name: "cf-ray", Value: "378948953f044408-SFO-DOG"}, - {Name: "cf-visitor", Value: "{\"scheme\":\"https\"}"}, - {Name: "cf-connecting-ip", Value: "2400:cb00:0025:010d:0000:0000:0000:0001"}, - {Name: "x-forwarded-for", Value: "2400:cb00:0025:010d:0000:0000:0000:0001"}, - {Name: "x-forwarded-proto", Value: "https"}, - {Name: "accept-language", Value: "en-gb"}, - {Name: "referer", Value: "https://tunnel.otterlyadorable.co.uk/"}, - {Name: "cookie", Value: "__cfduid=d4555095065f92daedc059490771967d81493032162"}, - {Name: "connection", Value: "Keep-Alive"}, - {Name: "cf-ipcountry", Value: "US"}, - {Name: "accept", Value: "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8"}, - {Name: "user-agent", Value: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_5) AppleWebKit/603.2.4 (KHTML, like Gecko) Version/10.1.1 Safari/603.2.4"}, - }, - nil, - ) - if err != nil { - t.Fatalf("error in OpenStream: %s", err) - } - if len(stream.Headers) == 0 { - t.Fatal("response has no headers") - } - if stream.Headers[0].Name != ":status" { - t.Fatalf("first header should be status, found %s instead", stream.Headers[0].Name) - } - if stream.Headers[0].Value != "200" { - t.Fatalf("expected status 200, got %s", stream.Headers[0].Value) - } - _, _ = io.ReadAll(stream) - _ = stream.Close() - } -} - -func AssertIfPipeReadable(t *testing.T, pipe io.ReadCloser) { - errC := make(chan error) - go func() { - b := []byte{0} - n, err := pipe.Read(b) - if n > 0 { - t.Errorf("read pipe was not empty") - return - } - errC <- err - }() - select { - case err := <-errC: - if err != nil { - t.Fatalf("read error: %s", err) - } - case <-time.After(100 * time.Millisecond): - // nothing to read - } -} - -func sampleSiteHandler(files map[string][]byte) MuxedStreamFunc { - return func(stream *MuxedStream) error { - var contentType string - var pathHeader Header - - for _, h := range stream.Headers { - if h.Name == ":path" { - pathHeader = h - break - } - } - - if pathHeader.Name != ":path" { - return fmt.Errorf("Couldn't find :path header in test") - } - - if strings.Contains(pathHeader.Value, "html") { - contentType = "text/html; charset=utf-8" - } else if strings.Contains(pathHeader.Value, "js") { - contentType = "application/javascript" - } else if strings.Contains(pathHeader.Value, "css") { - contentType = "text/css" - } else { - contentType = "img/gif" - } - _ = stream.WriteHeaders([]Header{ - {Name: "content-type", Value: contentType}, - }) - log.Debug().Msgf("Wrote headers for stream %s", pathHeader.Value) - file, ok := files[pathHeader.Value] - if !ok { - return fmt.Errorf("%s content is not preloaded", pathHeader.Value) - } - _, _ = stream.Write(file) - log.Debug().Msgf("Wrote body for stream %s", pathHeader.Value) - return nil - } -} - -func sampleSiteTest(muxPair *DefaultMuxerPair, path string, files map[string][]byte) error { - stream, err := muxPair.OpenEdgeMuxStream( - []Header{ - {Name: ":method", Value: "GET"}, - {Name: ":scheme", Value: "https"}, - {Name: ":authority", Value: "tunnel.otterlyadorable.co.uk"}, - {Name: ":path", Value: path}, - {Name: "accept-encoding", Value: "br, gzip"}, - {Name: "cf-ray", Value: "378948953f044408-SFO-DOG"}, - }, - nil, - ) - if err != nil { - return fmt.Errorf("error in OpenStream: %v", err) - } - file, ok := files[path] - if !ok { - return fmt.Errorf("%s content is not preloaded", path) - } - responseBody := make([]byte, len(file)) - n, err := io.ReadFull(stream, responseBody) - if err != nil { - return fmt.Errorf("error from (*MuxedStream).Read: %v", err) - } - if n != len(file) { - return fmt.Errorf("expected response body to have %d bytes, got %d", len(file), n) - } - if string(responseBody[:n]) != string(file) { - return fmt.Errorf("expected response body %s, got %s", file, responseBody[:n]) - } - return nil -} - -func loadSampleFiles(paths []string) (map[string][]byte, error) { - files := make(map[string][]byte) - for _, path := range paths { - if _, ok := files[path]; !ok { - expectBody, err := os.ReadFile(path) - if err != nil { - return nil, err - } - files[path] = expectBody - } - } - return files, nil -} - -func BenchmarkOpenStream(b *testing.B) { - const streams = 5000 - for i := 0; i < b.N; i++ { - b.StopTimer() - f := MuxedStreamFunc(func(stream *MuxedStream) error { - if len(stream.Headers) != 1 { - b.Fatalf("expected %d headers, got %d", 1, len(stream.Headers)) - } - if stream.Headers[0].Name != "test-header" { - b.Fatalf("expected header name %s, got %s", "test-header", stream.Headers[0].Name) - } - if stream.Headers[0].Value != "headerValue" { - b.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value) - } - _ = stream.WriteHeaders([]Header{ - {Name: "response-header", Value: "responseValue"}, - }) - return nil - }) - muxPair := NewDefaultMuxerPair(b, fmt.Sprintf("%s_%d", b.Name(), i), f) - muxPair.Serve(b) - b.StartTimer() - openStreams(b, muxPair, streams) - } -} - -func openStreams(b *testing.B, muxPair *DefaultMuxerPair, n int) { - errGroup, _ := errgroup.WithContext(context.Background()) - for i := 0; i < n; i++ { - errGroup.Go(func() error { - _, err := muxPair.OpenEdgeMuxStream( - []Header{{Name: "test-header", Value: "headerValue"}}, - nil, - ) - return err - }) - } - assert.NoError(b, errGroup.Wait()) -} - -func BenchmarkSingleStreamLargeResponseBody(b *testing.B) { - const bodySize = 1 << 24 - - const writeBufferSize = 16 << 10 - const writeN = bodySize / writeBufferSize - payload := make([]byte, writeBufferSize) - for i := range payload { - payload[i] = byte(i % 256) - } - - const readBufferSize = 16 << 10 - const readN = bodySize / readBufferSize - responseBody := make([]byte, readBufferSize) - - f := MuxedStreamFunc(func(stream *MuxedStream) error { - if len(stream.Headers) != 1 { - b.Fatalf("expected %d headers, got %d", 1, len(stream.Headers)) - } - if stream.Headers[0].Name != "test-header" { - b.Fatalf("expected header name %s, got %s", "test-header", stream.Headers[0].Name) - } - if stream.Headers[0].Value != "headerValue" { - b.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value) - } - _ = stream.WriteHeaders([]Header{ - {Name: "response-header", Value: "responseValue"}, - }) - for i := 0; i < writeN; i++ { - n, err := stream.Write(payload) - if err != nil { - b.Fatalf("origin write error: %s", err) - } - if n != len(payload) { - b.Fatalf("origin short write: %d/%d bytes", n, len(payload)) - } - } - - return nil - }) - - name := fmt.Sprintf("%s_%d", b.Name(), rand.Int()) - origin, edge := net.Pipe() - - muxPair := &DefaultMuxerPair{ - OriginMuxConfig: MuxerConfig{ - Timeout: testHandshakeTimeout, - Handler: f, - IsClient: true, - Name: "origin", - Log: &log, - DefaultWindowSize: defaultWindowSize, - MaxWindowSize: maxWindowSize, - StreamWriteBufferMaxLen: defaultWriteBufferMaxLen, - HeartbeatInterval: defaultTimeout, - MaxHeartbeats: defaultRetries, - }, - OriginConn: origin, - EdgeMuxConfig: MuxerConfig{ - Timeout: testHandshakeTimeout, - IsClient: false, - Name: "edge", - Log: &log, - DefaultWindowSize: defaultWindowSize, - MaxWindowSize: maxWindowSize, - StreamWriteBufferMaxLen: defaultWriteBufferMaxLen, - HeartbeatInterval: defaultTimeout, - MaxHeartbeats: defaultRetries, - }, - EdgeConn: edge, - doneC: make(chan struct{}), - } - assert.NoError(b, muxPair.Handshake(name)) - muxPair.Serve(b) - - b.ReportAllocs() - for i := 0; i < b.N; i++ { - stream, err := muxPair.OpenEdgeMuxStream( - []Header{{Name: "test-header", Value: "headerValue"}}, - nil, - ) - if err != nil { - b.Fatalf("error in OpenStream: %s", err) - } - if len(stream.Headers) != 1 { - b.Fatalf("expected %d headers, got %d", 1, len(stream.Headers)) - } - if stream.Headers[0].Name != "response-header" { - b.Fatalf("expected header name %s, got %s", "response-header", stream.Headers[0].Name) - } - if stream.Headers[0].Value != "responseValue" { - b.Fatalf("expected header value %s, got %s", "responseValue", stream.Headers[0].Value) - } - - for k := 0; k < readN; k++ { - n, err := io.ReadFull(stream, responseBody) - if err != nil { - b.Fatalf("error from (*MuxedStream).Read: %s", err) - } - if n != len(responseBody) { - b.Fatalf("expected response body to have %d bytes, got %d", len(responseBody), n) - } - } - } -} diff --git a/h2mux/idletimer.go b/h2mux/idletimer.go deleted file mode 100644 index 6e171801..00000000 --- a/h2mux/idletimer.go +++ /dev/null @@ -1,81 +0,0 @@ -package h2mux - -import ( - "math/rand" - "sync" - "time" -) - -// IdleTimer is a type of Timer designed for managing heartbeats on an idle connection. -// The timer ticks on an interval with added jitter to avoid accidental synchronisation -// between two endpoints. It tracks the number of retries/ticks since the connection was -// last marked active. -// -// The methods of IdleTimer must not be called while a goroutine is reading from C. -type IdleTimer struct { - // The channel on which ticks are delivered. - C <-chan time.Time - - // A timer used to measure idle connection time. Reset after sending data. - idleTimer *time.Timer - // The maximum length of time a connection is idle before sending a ping. - idleDuration time.Duration - // A pseudorandom source used to add jitter to the idle duration. - randomSource *rand.Rand - // The maximum number of retries allowed. - maxRetries uint64 - // The number of retries since the connection was last marked active. - retries uint64 - // A lock to prevent race condition while checking retries - stateLock sync.RWMutex -} - -func NewIdleTimer(idleDuration time.Duration, maxRetries uint64) *IdleTimer { - t := &IdleTimer{ - idleTimer: time.NewTimer(idleDuration), - idleDuration: idleDuration, - randomSource: rand.New(rand.NewSource(time.Now().Unix())), - maxRetries: maxRetries, - } - t.C = t.idleTimer.C - return t -} - -// Retry should be called when retrying the idle timeout. If the maximum number of retries -// has been met, returns false. -// After calling this function and sending a heartbeat, call ResetTimer. Since sending the -// heartbeat could be a blocking operation, we resetting the timer after the write completes -// to avoid it expiring during the write. -func (t *IdleTimer) Retry() bool { - t.stateLock.Lock() - defer t.stateLock.Unlock() - if t.retries >= t.maxRetries { - return false - } - t.retries++ - return true -} - -func (t *IdleTimer) RetryCount() uint64 { - t.stateLock.RLock() - defer t.stateLock.RUnlock() - return t.retries -} - -// MarkActive resets the idle connection timer and suppresses any outstanding idle events. -func (t *IdleTimer) MarkActive() { - if !t.idleTimer.Stop() { - // eat the timer event to prevent spurious pings - <-t.idleTimer.C - } - t.stateLock.Lock() - t.retries = 0 - t.stateLock.Unlock() - t.ResetTimer() -} - -// Reset the idle timer according to the configured duration, with some added jitter. -func (t *IdleTimer) ResetTimer() { - jitter := time.Duration(t.randomSource.Int63n(int64(t.idleDuration))) - t.idleTimer.Reset(t.idleDuration + jitter) -} diff --git a/h2mux/idletimer_test.go b/h2mux/idletimer_test.go deleted file mode 100644 index 92f2b2a3..00000000 --- a/h2mux/idletimer_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package h2mux - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestRetry(t *testing.T) { - timer := NewIdleTimer(time.Second, 2) - assert.Equal(t, uint64(0), timer.RetryCount()) - ok := timer.Retry() - assert.True(t, ok) - assert.Equal(t, uint64(1), timer.RetryCount()) - ok = timer.Retry() - assert.True(t, ok) - assert.Equal(t, uint64(2), timer.RetryCount()) - ok = timer.Retry() - assert.False(t, ok) -} - -func TestMarkActive(t *testing.T) { - timer := NewIdleTimer(time.Second, 2) - assert.Equal(t, uint64(0), timer.RetryCount()) - ok := timer.Retry() - assert.True(t, ok) - assert.Equal(t, uint64(1), timer.RetryCount()) - timer.MarkActive() - assert.Equal(t, uint64(0), timer.RetryCount()) -} diff --git a/h2mux/muxedstream.go b/h2mux/muxedstream.go deleted file mode 100644 index 2e75735f..00000000 --- a/h2mux/muxedstream.go +++ /dev/null @@ -1,457 +0,0 @@ -package h2mux - -import ( - "bytes" - "io" - "sync" -) - -type ReadWriteLengther interface { - io.ReadWriter - Reset() - Len() int -} - -type ReadWriteClosedCloser interface { - io.ReadWriteCloser - Closed() bool -} - -// MuxedStreamDataSignaller is a write-only *ReadyList -type MuxedStreamDataSignaller interface { - // Non-blocking: call this when data is ready to be sent for the given stream ID. - Signal(ID uint32) -} - -type Header struct { - Name, Value string -} - -// MuxedStream is logically an HTTP/2 stream, with an additional buffer for outgoing data. -type MuxedStream struct { - streamID uint32 - - // The "Receive" end of the stream - readBufferLock sync.RWMutex - readBuffer ReadWriteClosedCloser - // This is the amount of bytes that are in our receive window - // (how much data we can receive into this stream). - receiveWindow uint32 - // current receive window size limit. Exponentially increase it when it's exhausted - receiveWindowCurrentMax uint32 - // hard limit set in http2 spec. 2^31-1 - receiveWindowMax uint32 - // The desired size increment for receiveWindow. - // If this is nonzero, a WINDOW_UPDATE frame needs to be sent. - windowUpdate uint32 - // The headers that were most recently received. - // Particularly: - // * for an eyeball-initiated stream (as passed to TunnelHandler::ServeStream), - // these are the request headers - // * for a cloudflared-initiated stream (as created by Register/UnregisterTunnel), - // these are the response headers. - // They are useful in both of these contexts; hence `Headers` is public. - Headers []Header - // For use in the context of a cloudflared-initiated stream. - responseHeadersReceived chan struct{} - - // The "Send" end of the stream - writeLock sync.Mutex - writeBuffer ReadWriteLengther - // The maximum capacity that the send buffer should grow to. - writeBufferMaxLen int - // A channel to be notified when the send buffer is not full. - writeBufferHasSpace chan struct{} - // This is the amount of bytes that are in the peer's receive window - // (how much data we can send from this stream). - sendWindow uint32 - // The muxer's readyList - readyList MuxedStreamDataSignaller - // The headers that should be sent, and a flag so we only send them once. - headersSent bool - writeHeaders []Header - - // EOF-related fields - // true if the write end of this stream has been closed - writeEOF bool - // true if we have sent EOF to the peer - sentEOF bool - // true if the peer sent us an EOF - receivedEOF bool - // Compression-related fields - receivedUseDict bool - method string - contentType string - path string - dictionaries h2Dictionaries -} - -type TunnelHostname string - -func (th TunnelHostname) String() string { - return string(th) -} - -func (th TunnelHostname) IsSet() bool { - return th != "" -} - -func NewStream(config MuxerConfig, writeHeaders []Header, readyList MuxedStreamDataSignaller, dictionaries h2Dictionaries) *MuxedStream { - return &MuxedStream{ - responseHeadersReceived: make(chan struct{}), - readBuffer: NewSharedBuffer(), - writeBuffer: &bytes.Buffer{}, - writeBufferMaxLen: config.StreamWriteBufferMaxLen, - writeBufferHasSpace: make(chan struct{}, 1), - receiveWindow: config.DefaultWindowSize, - receiveWindowCurrentMax: config.DefaultWindowSize, - receiveWindowMax: config.MaxWindowSize, - sendWindow: config.DefaultWindowSize, - readyList: readyList, - writeHeaders: writeHeaders, - dictionaries: dictionaries, - } -} - -func (s *MuxedStream) Read(p []byte) (n int, err error) { - var readBuffer ReadWriteClosedCloser - if s.dictionaries.read != nil { - s.readBufferLock.RLock() - readBuffer = s.readBuffer - s.readBufferLock.RUnlock() - } else { - readBuffer = s.readBuffer - } - n, err = readBuffer.Read(p) - s.replenishReceiveWindow(uint32(n)) - return -} - -// Blocks until len(p) bytes have been written to the buffer -func (s *MuxedStream) Write(p []byte) (int, error) { - // If assignDictToStream returns success, then it will have acquired the - // writeLock. Otherwise we must acquire it ourselves. - ok := assignDictToStream(s, p) - if !ok { - s.writeLock.Lock() - } - defer s.writeLock.Unlock() - - if s.writeEOF { - return 0, io.EOF - } - - // pre-allocate some space in the write buffer if possible - if buffer, ok := s.writeBuffer.(*bytes.Buffer); ok { - if buffer.Cap() == 0 { - buffer.Grow(writeBufferInitialSize) - } - } - - totalWritten := 0 - for totalWritten < len(p) { - // If the buffer is full, block till there is more room. - // Use a loop to recheck the buffer size after the lock is reacquired. - for s.writeBufferMaxLen <= s.writeBuffer.Len() { - s.awaitWriteBufferHasSpace() - if s.writeEOF { - return totalWritten, io.EOF - } - } - amountToWrite := len(p) - totalWritten - spaceAvailable := s.writeBufferMaxLen - s.writeBuffer.Len() - if spaceAvailable < amountToWrite { - amountToWrite = spaceAvailable - } - amountWritten, err := s.writeBuffer.Write(p[totalWritten : totalWritten+amountToWrite]) - totalWritten += amountWritten - if err != nil { - return totalWritten, err - } - s.writeNotify() - } - return totalWritten, nil -} - -func (s *MuxedStream) Close() error { - // TUN-115: Close the write buffer before the read buffer. - // In the case of shutdown, read will not get new data, but the write buffer can still receive - // new data. Closing read before write allows application to race between a failed read and a - // successful write, even though this close should appear to be atomic. - // This can't happen the other way because reads may succeed after a failed write; if we read - // past EOF the application will block until we close the buffer. - err := s.CloseWrite() - if err != nil { - if s.CloseRead() == nil { - // don't bother the caller with errors if at least one close succeeded - return nil - } - return err - } - return s.CloseRead() -} - -func (s *MuxedStream) CloseRead() error { - return s.readBuffer.Close() -} - -func (s *MuxedStream) CloseWrite() error { - s.writeLock.Lock() - defer s.writeLock.Unlock() - if s.writeEOF { - return io.EOF - } - s.writeEOF = true - if c, ok := s.writeBuffer.(io.Closer); ok { - c.Close() - } - // Allow MuxedStream::Write() to terminate its loop with err=io.EOF, if needed - s.notifyWriteBufferHasSpace() - // We need to send something over the wire, even if it's an END_STREAM with no data - s.writeNotify() - return nil -} - -func (s *MuxedStream) WriteClosed() bool { - s.writeLock.Lock() - defer s.writeLock.Unlock() - return s.writeEOF -} - -func (s *MuxedStream) WriteHeaders(headers []Header) error { - s.writeLock.Lock() - defer s.writeLock.Unlock() - if s.writeHeaders != nil { - return ErrStreamHeadersSent - } - - if s.dictionaries.write != nil { - dictWriter := s.dictionaries.write.getDictWriter(s, headers) - if dictWriter != nil { - s.writeBuffer = dictWriter - } - - } - - s.writeHeaders = headers - s.headersSent = false - s.writeNotify() - return nil -} - -// IsRPCStream returns if the stream is used to transport RPC. -func (s *MuxedStream) IsRPCStream() bool { - rpcHeaders := RPCHeaders() - if len(s.Headers) != len(rpcHeaders) { - return false - } - // The headers order matters, so RPC stream should be opened with OpenRPCStream method and let MuxWriter serializes the headers. - for i, rpcHeader := range rpcHeaders { - if s.Headers[i] != rpcHeader { - return false - } - } - return true -} - -// Block until a value is sent on writeBufferHasSpace. -// Must be called while holding writeLock -func (s *MuxedStream) awaitWriteBufferHasSpace() { - s.writeLock.Unlock() - <-s.writeBufferHasSpace - s.writeLock.Lock() -} - -// Send a value on writeBufferHasSpace without blocking. -// Must be called while holding writeLock -func (s *MuxedStream) notifyWriteBufferHasSpace() { - select { - case s.writeBufferHasSpace <- struct{}{}: - default: - } -} - -func (s *MuxedStream) getReceiveWindow() uint32 { - s.writeLock.Lock() - defer s.writeLock.Unlock() - return s.receiveWindow -} - -func (s *MuxedStream) getSendWindow() uint32 { - s.writeLock.Lock() - defer s.writeLock.Unlock() - return s.sendWindow -} - -// writeNotify must happen while holding writeLock. -func (s *MuxedStream) writeNotify() { - s.readyList.Signal(s.streamID) -} - -// Call by muxreader when it gets a WindowUpdateFrame. This is an update of the peer's -// receive window (how much data we can send). -func (s *MuxedStream) replenishSendWindow(bytes uint32) { - s.writeLock.Lock() - defer s.writeLock.Unlock() - s.sendWindow += bytes - s.writeNotify() -} - -// Call by muxreader when it receives a data frame -func (s *MuxedStream) consumeReceiveWindow(bytes uint32) bool { - s.writeLock.Lock() - defer s.writeLock.Unlock() - // received data size is greater than receive window/buffer - if s.receiveWindow < bytes { - return false - } - s.receiveWindow -= bytes - if s.receiveWindow < s.receiveWindowCurrentMax/2 && s.receiveWindowCurrentMax < s.receiveWindowMax { - // exhausting client send window (how much data client can send) - // and there is room to grow the receive window - newMax := s.receiveWindowCurrentMax << 1 - if newMax > s.receiveWindowMax { - newMax = s.receiveWindowMax - } - s.windowUpdate += newMax - s.receiveWindowCurrentMax - s.receiveWindowCurrentMax = newMax - // notify MuxWriter to write WINDOW_UPDATE frame - s.writeNotify() - } - return true -} - -// Arranges for the MuxWriter to send a WINDOW_UPDATE -// Called by MuxedStream::Read when data has left the read buffer. -func (s *MuxedStream) replenishReceiveWindow(bytes uint32) { - s.writeLock.Lock() - defer s.writeLock.Unlock() - s.windowUpdate += bytes - s.writeNotify() -} - -// receiveEOF should be called when the peer indicates no more data will be sent. -// Returns true if the socket is now closed (i.e. the write side is already closed). -func (s *MuxedStream) receiveEOF() (closed bool) { - s.writeLock.Lock() - defer s.writeLock.Unlock() - s.receivedEOF = true - s.CloseRead() - return s.writeEOF && s.writeBuffer.Len() == 0 -} - -func (s *MuxedStream) gotReceiveEOF() bool { - s.writeLock.Lock() - defer s.writeLock.Unlock() - return s.receivedEOF -} - -// MuxedStreamReader implements io.ReadCloser for the read end of the stream. -// This is useful for passing to functions that close the object after it is done reading, -// but you still want to be able to write data afterwards (e.g. http.Client). -type MuxedStreamReader struct { - *MuxedStream -} - -func (s MuxedStreamReader) Read(p []byte) (n int, err error) { - return s.MuxedStream.Read(p) -} - -func (s MuxedStreamReader) Close() error { - return s.MuxedStream.CloseRead() -} - -// streamChunk represents a chunk of data to be written. -type streamChunk struct { - streamID uint32 - // true if a HEADERS frame should be sent - sendHeaders bool - headers []Header - // nonzero if a WINDOW_UPDATE frame should be sent; - // in that case, it is the increment value to use - windowUpdate uint32 - // true if data frames should be sent - sendData bool - eof bool - - buffer []byte - offset int -} - -// getChunk atomically extracts a chunk of data to be written by MuxWriter. -// The data returned will not exceed the send window for this stream. -func (s *MuxedStream) getChunk() *streamChunk { - s.writeLock.Lock() - defer s.writeLock.Unlock() - - chunk := &streamChunk{ - streamID: s.streamID, - sendHeaders: !s.headersSent, - headers: s.writeHeaders, - windowUpdate: s.windowUpdate, - sendData: !s.sentEOF, - eof: s.writeEOF && uint32(s.writeBuffer.Len()) <= s.sendWindow, - } - // Copy at most s.sendWindow bytes, adjust the sendWindow accordingly - toCopy := int(s.sendWindow) - if toCopy > s.writeBuffer.Len() { - toCopy = s.writeBuffer.Len() - } - - if toCopy > 0 { - buf := make([]byte, toCopy) - writeLen, _ := s.writeBuffer.Read(buf) - chunk.buffer = buf[:writeLen] - s.sendWindow -= uint32(writeLen) - } - - // Allow MuxedStream::Write() to continue, if needed - if s.writeBuffer.Len() < s.writeBufferMaxLen { - s.notifyWriteBufferHasSpace() - } - - // When we write the chunk, we'll write the WINDOW_UPDATE frame if needed - s.receiveWindow += s.windowUpdate - s.windowUpdate = 0 - - // When we write the chunk, we'll write the headers if needed - s.headersSent = true - - // if this chunk contains the end of the stream, close the stream now - if chunk.sendData && chunk.eof { - s.sentEOF = true - } - - return chunk -} - -func (c *streamChunk) sendHeadersFrame() bool { - return c.sendHeaders -} - -func (c *streamChunk) sendWindowUpdateFrame() bool { - return c.windowUpdate > 0 -} - -func (c *streamChunk) sendDataFrame() bool { - return c.sendData -} - -func (c *streamChunk) nextDataFrame(frameSize int) (payload []byte, endStream bool) { - bytesLeft := len(c.buffer) - c.offset - if frameSize > bytesLeft { - frameSize = bytesLeft - } - nextOffset := c.offset + frameSize - payload = c.buffer[c.offset:nextOffset] - c.offset = nextOffset - - if c.offset == len(c.buffer) { - // this is the last data frame in this chunk - c.sendData = false - if c.eof { - endStream = true - } - } - return -} diff --git a/h2mux/muxedstream_test.go b/h2mux/muxedstream_test.go deleted file mode 100644 index b0e0ac13..00000000 --- a/h2mux/muxedstream_test.go +++ /dev/null @@ -1,127 +0,0 @@ -package h2mux - -import ( - "bytes" - "io" - "testing" - - "github.com/stretchr/testify/assert" -) - -const testWindowSize uint32 = 65535 -const testMaxWindowSize uint32 = testWindowSize << 2 - -// Only sending WINDOW_UPDATE frame, so sendWindow should never change -func TestFlowControlSingleStream(t *testing.T) { - stream := &MuxedStream{ - responseHeadersReceived: make(chan struct{}), - readBuffer: NewSharedBuffer(), - writeBuffer: &bytes.Buffer{}, - receiveWindow: testWindowSize, - receiveWindowCurrentMax: testWindowSize, - receiveWindowMax: testMaxWindowSize, - sendWindow: testWindowSize, - readyList: NewReadyList(), - } - var tempWindowUpdate uint32 - var tempStreamChunk *streamChunk - - assert.True(t, stream.consumeReceiveWindow(testWindowSize/2)) - dataSent := testWindowSize / 2 - assert.Equal(t, testWindowSize-dataSent, stream.receiveWindow) - assert.Equal(t, testWindowSize, stream.receiveWindowCurrentMax) - assert.Equal(t, testWindowSize, stream.sendWindow) - assert.Equal(t, uint32(0), stream.windowUpdate) - - tempStreamChunk = stream.getChunk() - assert.Equal(t, uint32(0), tempStreamChunk.windowUpdate) - assert.Equal(t, testWindowSize-dataSent, stream.receiveWindow) - assert.Equal(t, testWindowSize, stream.receiveWindowCurrentMax) - assert.Equal(t, testWindowSize, stream.sendWindow) - assert.Equal(t, uint32(0), stream.windowUpdate) - - assert.True(t, stream.consumeReceiveWindow(2)) - dataSent += 2 - assert.Equal(t, testWindowSize-dataSent, stream.receiveWindow) - assert.Equal(t, testWindowSize<<1, stream.receiveWindowCurrentMax) - assert.Equal(t, testWindowSize, stream.sendWindow) - assert.Equal(t, testWindowSize, stream.windowUpdate) - tempWindowUpdate = stream.windowUpdate - - tempStreamChunk = stream.getChunk() - assert.Equal(t, tempWindowUpdate, tempStreamChunk.windowUpdate) - assert.Equal(t, (testWindowSize<<1)-dataSent, stream.receiveWindow) - assert.Equal(t, testWindowSize<<1, stream.receiveWindowCurrentMax) - assert.Equal(t, testWindowSize, stream.sendWindow) - assert.Equal(t, uint32(0), stream.windowUpdate) - - assert.True(t, stream.consumeReceiveWindow(testWindowSize+10)) - dataSent += testWindowSize + 10 - assert.Equal(t, (testWindowSize<<1)-dataSent, stream.receiveWindow) - assert.Equal(t, testWindowSize<<2, stream.receiveWindowCurrentMax) - assert.Equal(t, testWindowSize, stream.sendWindow) - assert.Equal(t, testWindowSize<<1, stream.windowUpdate) - tempWindowUpdate = stream.windowUpdate - - tempStreamChunk = stream.getChunk() - assert.Equal(t, tempWindowUpdate, tempStreamChunk.windowUpdate) - assert.Equal(t, (testWindowSize<<2)-dataSent, stream.receiveWindow) - assert.Equal(t, testWindowSize<<2, stream.receiveWindowCurrentMax) - assert.Equal(t, testWindowSize, stream.sendWindow) - assert.Equal(t, uint32(0), stream.windowUpdate) - - assert.False(t, stream.consumeReceiveWindow(testMaxWindowSize+1)) - assert.Equal(t, (testWindowSize<<2)-dataSent, stream.receiveWindow) - assert.Equal(t, testMaxWindowSize, stream.receiveWindowCurrentMax) -} - -func TestMuxedStreamEOF(t *testing.T) { - for i := 0; i < 4096; i++ { - readyList := NewReadyList() - stream := &MuxedStream{ - streamID: 1, - readBuffer: NewSharedBuffer(), - receiveWindow: 65536, - receiveWindowMax: 65536, - sendWindow: 65536, - readyList: readyList, - } - - go func() { stream.Close() }() - n, err := stream.Read([]byte{0}) - assert.Equal(t, io.EOF, err) - assert.Equal(t, 0, n) - // Write comes after read, because write buffers data before it is flushed. It wouldn't know about EOF - // until some time later. Calling read first forces it to know about EOF now. - n, err = stream.Write([]byte{1}) - assert.Equal(t, io.EOF, err) - assert.Equal(t, 0, n) - } -} - -func TestIsRPCStream(t *testing.T) { - tests := []struct { - stream *MuxedStream - isRPCStream bool - }{ - { - stream: &MuxedStream{}, - isRPCStream: false, - }, - { - stream: &MuxedStream{Headers: RPCHeaders()}, - isRPCStream: true, - }, - { - stream: &MuxedStream{Headers: []Header{ - {Name: ":method", Value: "rpc"}, - {Name: ":scheme", Value: "Capnp"}, - {Name: ":path", Value: "/"}, - }}, - isRPCStream: false, - }, - } - for _, test := range tests { - assert.Equal(t, test.isRPCStream, test.stream.IsRPCStream()) - } -} diff --git a/h2mux/muxmetrics.go b/h2mux/muxmetrics.go deleted file mode 100644 index 3423bde0..00000000 --- a/h2mux/muxmetrics.go +++ /dev/null @@ -1,296 +0,0 @@ -package h2mux - -import ( - "sync" - "time" - - "github.com/golang-collections/collections/queue" - "github.com/rs/zerolog" -) - -// data points used to compute average receive window and send window size -const ( - // data points used to compute average receive window and send window size - dataPoints = 100 - // updateFreq is set to 1 sec so we can get inbound & outbound byes/sec - updateFreq = time.Second -) - -type muxMetricsUpdater interface { - // metrics returns the latest metrics - metrics() *MuxerMetrics - // run is a blocking call to start the event loop - run(log *zerolog.Logger) error - // updateRTTChan is called by muxReader to report new RTT measurements - updateRTT(rtt *roundTripMeasurement) - //updateReceiveWindowChan is called by muxReader and muxWriter when receiveWindow size is updated - updateReceiveWindow(receiveWindow uint32) - //updateSendWindowChan is called by muxReader and muxWriter when sendWindow size is updated - updateSendWindow(sendWindow uint32) - // updateInBoundBytesChan is called periodicallyby muxReader to report bytesRead - updateInBoundBytes(inBoundBytes uint64) - // updateOutBoundBytesChan is called periodically by muxWriter to report bytesWrote - updateOutBoundBytes(outBoundBytes uint64) -} - -type muxMetricsUpdaterImpl struct { - // rttData keeps record of rtt, rttMin, rttMax and last measured time - rttData *rttData - // receiveWindowData keeps record of receive window measurement - receiveWindowData *flowControlData - // sendWindowData keeps record of send window measurement - sendWindowData *flowControlData - // inBoundRate is incoming bytes/sec - inBoundRate *rate - // outBoundRate is outgoing bytes/sec - outBoundRate *rate - // updateRTTChan is the channel to receive new RTT measurement - updateRTTChan chan *roundTripMeasurement - //updateReceiveWindowChan is the channel to receive updated receiveWindow size - updateReceiveWindowChan chan uint32 - //updateSendWindowChan is the channel to receive updated sendWindow size - updateSendWindowChan chan uint32 - // updateInBoundBytesChan us the channel to receive bytesRead - updateInBoundBytesChan chan uint64 - // updateOutBoundBytesChan us the channel to receive bytesWrote - updateOutBoundBytesChan chan uint64 - // shutdownC is to signal the muxerMetricsUpdater to shutdown - abortChan <-chan struct{} - - compBytesBefore, compBytesAfter *AtomicCounter -} - -type MuxerMetrics struct { - RTT, RTTMin, RTTMax time.Duration - ReceiveWindowAve, SendWindowAve float64 - ReceiveWindowMin, ReceiveWindowMax, SendWindowMin, SendWindowMax uint32 - InBoundRateCurr, InBoundRateMin, InBoundRateMax uint64 - OutBoundRateCurr, OutBoundRateMin, OutBoundRateMax uint64 - CompBytesBefore, CompBytesAfter *AtomicCounter -} - -func (m *MuxerMetrics) CompRateAve() float64 { - if m.CompBytesBefore.Value() == 0 { - return 1. - } - return float64(m.CompBytesAfter.Value()) / float64(m.CompBytesBefore.Value()) -} - -type roundTripMeasurement struct { - receiveTime, sendTime time.Time -} - -type rttData struct { - rtt, rttMin, rttMax time.Duration - lastMeasurementTime time.Time - lock sync.RWMutex -} - -type flowControlData struct { - sum uint64 - min, max uint32 - queue *queue.Queue - lock sync.RWMutex -} - -type rate struct { - curr uint64 - min, max uint64 - lock sync.RWMutex -} - -func newMuxMetricsUpdater( - abortChan <-chan struct{}, - compBytesBefore, compBytesAfter *AtomicCounter, -) muxMetricsUpdater { - updateRTTChan := make(chan *roundTripMeasurement, 1) - updateReceiveWindowChan := make(chan uint32, 1) - updateSendWindowChan := make(chan uint32, 1) - updateInBoundBytesChan := make(chan uint64) - updateOutBoundBytesChan := make(chan uint64) - - return &muxMetricsUpdaterImpl{ - rttData: newRTTData(), - receiveWindowData: newFlowControlData(), - sendWindowData: newFlowControlData(), - inBoundRate: newRate(), - outBoundRate: newRate(), - updateRTTChan: updateRTTChan, - updateReceiveWindowChan: updateReceiveWindowChan, - updateSendWindowChan: updateSendWindowChan, - updateInBoundBytesChan: updateInBoundBytesChan, - updateOutBoundBytesChan: updateOutBoundBytesChan, - abortChan: abortChan, - compBytesBefore: compBytesBefore, - compBytesAfter: compBytesAfter, - } -} - -func (updater *muxMetricsUpdaterImpl) metrics() *MuxerMetrics { - m := &MuxerMetrics{} - m.RTT, m.RTTMin, m.RTTMax = updater.rttData.metrics() - m.ReceiveWindowAve, m.ReceiveWindowMin, m.ReceiveWindowMax = updater.receiveWindowData.metrics() - m.SendWindowAve, m.SendWindowMin, m.SendWindowMax = updater.sendWindowData.metrics() - m.InBoundRateCurr, m.InBoundRateMin, m.InBoundRateMax = updater.inBoundRate.get() - m.OutBoundRateCurr, m.OutBoundRateMin, m.OutBoundRateMax = updater.outBoundRate.get() - m.CompBytesBefore, m.CompBytesAfter = updater.compBytesBefore, updater.compBytesAfter - return m -} - -func (updater *muxMetricsUpdaterImpl) run(log *zerolog.Logger) error { - defer log.Debug().Msg("mux - metrics: event loop finished") - for { - select { - case <-updater.abortChan: - log.Debug().Msgf("mux - metrics: Stopping mux metrics updater") - return nil - case roundTripMeasurement := <-updater.updateRTTChan: - go updater.rttData.update(roundTripMeasurement) - log.Debug().Msg("mux - metrics: Update rtt") - case receiveWindow := <-updater.updateReceiveWindowChan: - go updater.receiveWindowData.update(receiveWindow) - log.Debug().Msg("mux - metrics: Update receive window") - case sendWindow := <-updater.updateSendWindowChan: - go updater.sendWindowData.update(sendWindow) - log.Debug().Msg("mux - metrics: Update send window") - case inBoundBytes := <-updater.updateInBoundBytesChan: - // inBoundBytes is bytes/sec because the update interval is 1 sec - go updater.inBoundRate.update(inBoundBytes) - log.Debug().Msgf("mux - metrics: Inbound bytes %d", inBoundBytes) - case outBoundBytes := <-updater.updateOutBoundBytesChan: - // outBoundBytes is bytes/sec because the update interval is 1 sec - go updater.outBoundRate.update(outBoundBytes) - log.Debug().Msgf("mux - metrics: Outbound bytes %d", outBoundBytes) - } - } -} - -func (updater *muxMetricsUpdaterImpl) updateRTT(rtt *roundTripMeasurement) { - select { - case updater.updateRTTChan <- rtt: - case <-updater.abortChan: - } - -} - -func (updater *muxMetricsUpdaterImpl) updateReceiveWindow(receiveWindow uint32) { - select { - case updater.updateReceiveWindowChan <- receiveWindow: - case <-updater.abortChan: - } -} - -func (updater *muxMetricsUpdaterImpl) updateSendWindow(sendWindow uint32) { - select { - case updater.updateSendWindowChan <- sendWindow: - case <-updater.abortChan: - } -} - -func (updater *muxMetricsUpdaterImpl) updateInBoundBytes(inBoundBytes uint64) { - select { - case updater.updateInBoundBytesChan <- inBoundBytes: - case <-updater.abortChan: - } - -} - -func (updater *muxMetricsUpdaterImpl) updateOutBoundBytes(outBoundBytes uint64) { - select { - case updater.updateOutBoundBytesChan <- outBoundBytes: - case <-updater.abortChan: - } -} - -func newRTTData() *rttData { - return &rttData{} -} - -func (r *rttData) update(measurement *roundTripMeasurement) { - r.lock.Lock() - defer r.lock.Unlock() - // discard pings before lastMeasurementTime - if r.lastMeasurementTime.After(measurement.sendTime) { - return - } - r.lastMeasurementTime = measurement.sendTime - r.rtt = measurement.receiveTime.Sub(measurement.sendTime) - if r.rttMax < r.rtt { - r.rttMax = r.rtt - } - if r.rttMin == 0 || r.rttMin > r.rtt { - r.rttMin = r.rtt - } -} - -func (r *rttData) metrics() (rtt, rttMin, rttMax time.Duration) { - r.lock.RLock() - defer r.lock.RUnlock() - return r.rtt, r.rttMin, r.rttMax -} - -func newFlowControlData() *flowControlData { - return &flowControlData{queue: queue.New()} -} - -func (f *flowControlData) update(measurement uint32) { - f.lock.Lock() - defer f.lock.Unlock() - var firstItem uint32 - // store new data into queue, remove oldest data if queue is full - f.queue.Enqueue(measurement) - if f.queue.Len() > dataPoints { - // data type should always be uint32 - firstItem = f.queue.Dequeue().(uint32) - } - // if (measurement - firstItem) < 0, uint64(measurement - firstItem) - // will overflow and become a large positive number - f.sum += uint64(measurement) - f.sum -= uint64(firstItem) - if measurement > f.max { - f.max = measurement - } - if f.min == 0 || measurement < f.min { - f.min = measurement - } -} - -// caller of ave() should acquire lock first -func (f *flowControlData) ave() float64 { - if f.queue.Len() == 0 { - return 0 - } - return float64(f.sum) / float64(f.queue.Len()) -} - -func (f *flowControlData) metrics() (ave float64, min, max uint32) { - f.lock.RLock() - defer f.lock.RUnlock() - return f.ave(), f.min, f.max -} - -func newRate() *rate { - return &rate{} -} - -func (r *rate) update(measurement uint64) { - r.lock.Lock() - defer r.lock.Unlock() - r.curr = measurement - // if measurement is 0, then there is no incoming/outgoing connection, don't update min/max - if r.curr == 0 { - return - } - if measurement > r.max { - r.max = measurement - } - if r.min == 0 || measurement < r.min { - r.min = measurement - } -} - -func (r *rate) get() (curr, min, max uint64) { - r.lock.RLock() - defer r.lock.RUnlock() - return r.curr, r.min, r.max -} diff --git a/h2mux/muxmetrics_test.go b/h2mux/muxmetrics_test.go deleted file mode 100644 index a9213a2c..00000000 --- a/h2mux/muxmetrics_test.go +++ /dev/null @@ -1,169 +0,0 @@ -package h2mux - -import ( - "sync" - "testing" - "time" - - "github.com/rs/zerolog" - "github.com/stretchr/testify/assert" -) - -func ave(sum uint64, len int) float64 { - return float64(sum) / float64(len) -} - -func TestRTTUpdate(t *testing.T) { - r := newRTTData() - start := time.Now() - // send at 0 ms, receive at 2 ms, RTT = 2ms - m := &roundTripMeasurement{receiveTime: start.Add(2 * time.Millisecond), sendTime: start} - r.update(m) - assert.Equal(t, start, r.lastMeasurementTime) - assert.Equal(t, 2*time.Millisecond, r.rtt) - assert.Equal(t, 2*time.Millisecond, r.rttMin) - assert.Equal(t, 2*time.Millisecond, r.rttMax) - - // send at 3 ms, receive at 6 ms, RTT = 3ms - m = &roundTripMeasurement{receiveTime: start.Add(6 * time.Millisecond), sendTime: start.Add(3 * time.Millisecond)} - r.update(m) - assert.Equal(t, start.Add(3*time.Millisecond), r.lastMeasurementTime) - assert.Equal(t, 3*time.Millisecond, r.rtt) - assert.Equal(t, 2*time.Millisecond, r.rttMin) - assert.Equal(t, 3*time.Millisecond, r.rttMax) - - // send at 7 ms, receive at 8 ms, RTT = 1ms - m = &roundTripMeasurement{receiveTime: start.Add(8 * time.Millisecond), sendTime: start.Add(7 * time.Millisecond)} - r.update(m) - assert.Equal(t, start.Add(7*time.Millisecond), r.lastMeasurementTime) - assert.Equal(t, 1*time.Millisecond, r.rtt) - assert.Equal(t, 1*time.Millisecond, r.rttMin) - assert.Equal(t, 3*time.Millisecond, r.rttMax) - - // send at -4 ms, receive at 0 ms, RTT = 4ms, but this ping is before last measurement - // so it will be discarded - m = &roundTripMeasurement{receiveTime: start, sendTime: start.Add(-2 * time.Millisecond)} - r.update(m) - assert.Equal(t, start.Add(7*time.Millisecond), r.lastMeasurementTime) - assert.Equal(t, 1*time.Millisecond, r.rtt) - assert.Equal(t, 1*time.Millisecond, r.rttMin) - assert.Equal(t, 3*time.Millisecond, r.rttMax) -} - -func TestFlowControlDataUpdate(t *testing.T) { - f := newFlowControlData() - assert.Equal(t, 0, f.queue.Len()) - assert.Equal(t, float64(0), f.ave()) - - var sum uint64 - min := maxWindowSize - dataPoints - max := maxWindowSize - for i := 1; i <= dataPoints; i++ { - size := maxWindowSize - uint32(i) - f.update(size) - assert.Equal(t, max-uint32(1), f.max) - assert.Equal(t, size, f.min) - - assert.Equal(t, i, f.queue.Len()) - - sum += uint64(size) - assert.Equal(t, sum, f.sum) - assert.Equal(t, ave(sum, f.queue.Len()), f.ave()) - } - - // queue is full, should start to dequeue first element - for i := 1; i <= dataPoints; i++ { - f.update(max) - assert.Equal(t, max, f.max) - assert.Equal(t, min, f.min) - - assert.Equal(t, dataPoints, f.queue.Len()) - - sum += uint64(i) - assert.Equal(t, sum, f.sum) - assert.Equal(t, ave(sum, dataPoints), f.ave()) - } -} - -func TestMuxMetricsUpdater(t *testing.T) { - t.Skip("Inherently racy test due to muxMetricsUpdaterImpl.run()") - errChan := make(chan error) - abortChan := make(chan struct{}) - compBefore, compAfter := NewAtomicCounter(0), NewAtomicCounter(0) - m := newMuxMetricsUpdater(abortChan, compBefore, compAfter) - log := zerolog.Nop() - - go func() { - errChan <- m.run(&log) - }() - - var wg sync.WaitGroup - wg.Add(2) - - // mock muxReader - readerStart := time.Now() - rm := &roundTripMeasurement{receiveTime: readerStart, sendTime: readerStart} - m.updateRTT(rm) - go func() { - defer wg.Done() - assert.Equal(t, 0, dataPoints%4, - "dataPoints is not divisible by 4; this test should be adjusted accordingly") - readerSend := readerStart.Add(time.Millisecond) - for i := 1; i <= dataPoints/4; i++ { - readerReceive := readerSend.Add(time.Duration(i) * time.Millisecond) - rm := &roundTripMeasurement{receiveTime: readerReceive, sendTime: readerSend} - m.updateRTT(rm) - readerSend = readerReceive.Add(time.Millisecond) - m.updateReceiveWindow(uint32(i)) - m.updateSendWindow(uint32(i)) - - m.updateInBoundBytes(uint64(i)) - } - }() - - // mock muxWriter - go func() { - defer wg.Done() - assert.Equal(t, 0, dataPoints%4, - "dataPoints is not divisible by 4; this test should be adjusted accordingly") - for j := dataPoints/4 + 1; j <= dataPoints/2; j++ { - m.updateReceiveWindow(uint32(j)) - m.updateSendWindow(uint32(j)) - - // should always be discarded since the send time is before readerSend - rm := &roundTripMeasurement{receiveTime: readerStart, sendTime: readerStart.Add(-time.Duration(j*dataPoints) * time.Millisecond)} - m.updateRTT(rm) - - m.updateOutBoundBytes(uint64(j)) - } - - }() - wg.Wait() - - metrics := m.metrics() - points := dataPoints / 2 - assert.Equal(t, time.Millisecond, metrics.RTTMin) - assert.Equal(t, time.Duration(dataPoints/4)*time.Millisecond, metrics.RTTMax) - - // sum(1..i) = i*(i+1)/2, ave(1..i) = i*(i+1)/2/i = (i+1)/2 - assert.Equal(t, float64(points+1)/float64(2), metrics.ReceiveWindowAve) - assert.Equal(t, uint32(1), metrics.ReceiveWindowMin) - assert.Equal(t, uint32(points), metrics.ReceiveWindowMax) - - assert.Equal(t, float64(points+1)/float64(2), metrics.SendWindowAve) - assert.Equal(t, uint32(1), metrics.SendWindowMin) - assert.Equal(t, uint32(points), metrics.SendWindowMax) - - assert.Equal(t, uint64(dataPoints/4), metrics.InBoundRateCurr) - assert.Equal(t, uint64(1), metrics.InBoundRateMin) - assert.Equal(t, uint64(dataPoints/4), metrics.InBoundRateMax) - - assert.Equal(t, uint64(dataPoints/2), metrics.OutBoundRateCurr) - assert.Equal(t, uint64(dataPoints/4+1), metrics.OutBoundRateMin) - assert.Equal(t, uint64(dataPoints/2), metrics.OutBoundRateMax) - - close(abortChan) - assert.Nil(t, <-errChan) - close(errChan) - -} diff --git a/h2mux/muxreader.go b/h2mux/muxreader.go deleted file mode 100644 index cf8d98f1..00000000 --- a/h2mux/muxreader.go +++ /dev/null @@ -1,508 +0,0 @@ -package h2mux - -import ( - "bytes" - "encoding/binary" - "fmt" - "io" - "net/url" - "time" - - "github.com/rs/zerolog" - "golang.org/x/net/http2" -) - -type MuxReader struct { - // f is used to read HTTP2 frames. - f *http2.Framer - // handler provides a callback to receive new streams. if nil, new streams cannot be accepted. - handler MuxedStreamHandler - // streams tracks currently-open streams. - streams *activeStreamMap - // readyList is used to signal writable streams. - readyList *ReadyList - // streamErrors lets us report stream errors to the MuxWriter. - streamErrors *StreamErrorMap - // goAwayChan is used to tell the writer to send a GOAWAY message. - goAwayChan chan<- http2.ErrCode - // abortChan is used when shutting down ungracefully. When this becomes readable, all activity should stop. - abortChan <-chan struct{} - // pingTimestamp is an atomic value containing the latest received ping timestamp. - pingTimestamp *PingTimestamp - // connActive is used to signal to the writer that something happened on the connection. - // This is used to clear idle timeout disconnection deadlines. - connActive Signal - // The initial value for the send and receive window of a new stream. - initialStreamWindow uint32 - // The max value for the send window of a stream. - streamWindowMax uint32 - // The max size for the write buffer of a stream - streamWriteBufferMaxLen int - // r is a reference to the underlying connection used when shutting down. - r io.Closer - // metricsUpdater is used to report metrics - metricsUpdater muxMetricsUpdater - // bytesRead is the amount of bytes read from data frames since the last time we called metricsUpdater.updateInBoundBytes() - bytesRead *AtomicCounter - // dictionaries holds the h2 cross-stream compression dictionaries - dictionaries h2Dictionaries -} - -// Shutdown blocks new streams from being created. -// It returns a channel that is closed once the last stream has closed. -func (r *MuxReader) Shutdown() <-chan struct{} { - done, alreadyInProgress := r.streams.Shutdown() - if alreadyInProgress { - return done - } - r.sendGoAway(http2.ErrCodeNo) - go func() { - // close reader side when last stream ends; this will cause the writer to abort - <-done - r.r.Close() - }() - return done -} - -func (r *MuxReader) run(log *zerolog.Logger) error { - defer log.Debug().Msg("mux - read: event loop finished") - - // routine to periodically update bytesRead - go func() { - ticker := time.NewTicker(updateFreq) - defer ticker.Stop() - for { - select { - case <-r.abortChan: - return - case <-ticker.C: - r.metricsUpdater.updateInBoundBytes(r.bytesRead.Count()) - } - } - }() - - for { - frame, err := r.f.ReadFrame() - if err != nil { - errorString := fmt.Sprintf("mux - read: %s", err) - if errorDetail := r.f.ErrorDetail(); errorDetail != nil { - errorString = fmt.Sprintf("%s: errorDetail: %s", errorString, errorDetail) - } - switch e := err.(type) { - case http2.StreamError: - log.Info().Msgf("%s: stream error", errorString) - // Ideally we wouldn't return here, since that aborts the muxer. - // We should communicate the error to the relevant MuxedStream - // data structure, so that callers of MuxedStream.Read() and - // MuxedStream.Write() would see it. Then we could `continue` - // and keep the muxer going. - return r.streamError(e.StreamID, e.Code) - case http2.ConnectionError: - log.Info().Msgf("%s: stream error", errorString) - return r.connectionError(err) - default: - if isConnectionClosedError(err) { - if r.streams.Len() == 0 { - // don't log the error here -- that would just be extra noise - log.Debug().Msg("mux - read: shutting down") - return nil - } - log.Info().Msgf("%s: connection closed unexpectedly", errorString) - return err - } else { - log.Info().Msgf("%s: frame read error", errorString) - return r.connectionError(err) - } - } - } - r.connActive.Signal() - log.Debug().Msgf("mux - read: read frame: data %v", frame) - switch f := frame.(type) { - case *http2.DataFrame: - err = r.receiveFrameData(f, log) - case *http2.MetaHeadersFrame: - err = r.receiveHeaderData(f) - case *http2.RSTStreamFrame: - streamID := f.Header().StreamID - if streamID == 0 { - return ErrInvalidStream - } - if stream, ok := r.streams.Get(streamID); ok { - stream.Close() - } - r.streams.Delete(streamID) - case *http2.PingFrame: - r.receivePingData(f) - case *http2.GoAwayFrame: - err = r.receiveGoAway(f) - // The receiver of a flow-controlled frame sends a WINDOW_UPDATE frame as it - // consumes data and frees up space in flow-control windows - case *http2.WindowUpdateFrame: - err = r.updateStreamWindow(f) - case *http2.UnknownFrame: - switch f.Header().Type { - case FrameUseDictionary: - err = r.receiveUseDictionary(f) - case FrameSetDictionary: - err = r.receiveSetDictionary(f) - default: - err = ErrUnexpectedFrameType - } - default: - err = ErrUnexpectedFrameType - } - if err != nil { - log.Debug().Msgf("mux - read: read error: data %v", frame) - return r.connectionError(err) - } - } -} - -func (r *MuxReader) newMuxedStream(streamID uint32) *MuxedStream { - return &MuxedStream{ - streamID: streamID, - readBuffer: NewSharedBuffer(), - writeBuffer: &bytes.Buffer{}, - writeBufferMaxLen: r.streamWriteBufferMaxLen, - writeBufferHasSpace: make(chan struct{}, 1), - receiveWindow: r.initialStreamWindow, - receiveWindowCurrentMax: r.initialStreamWindow, - receiveWindowMax: r.streamWindowMax, - sendWindow: r.initialStreamWindow, - readyList: r.readyList, - dictionaries: r.dictionaries, - } -} - -// getStreamForFrame returns a stream if valid, or an error describing why the stream could not be returned. -func (r *MuxReader) getStreamForFrame(frame http2.Frame) (*MuxedStream, error) { - sid := frame.Header().StreamID - if sid == 0 { - return nil, ErrUnexpectedFrameType - } - if stream, ok := r.streams.Get(sid); ok { - return stream, nil - } - if r.streams.IsLocalStreamID(sid) { - // no stream available, but no error - return nil, ErrClosedStream - } - if sid < r.streams.LastPeerStreamID() { - // no stream available, stream closed error - return nil, ErrClosedStream - } - return nil, ErrUnknownStream -} - -func (r *MuxReader) defaultStreamErrorHandler(err error, header http2.FrameHeader) error { - if header.Flags.Has(http2.FlagHeadersEndStream) { - return nil - } else if err == ErrUnknownStream || err == ErrClosedStream { - return r.streamError(header.StreamID, http2.ErrCodeStreamClosed) - } else { - return err - } -} - -// Receives header frames from a stream. A non-nil error is a connection error. -func (r *MuxReader) receiveHeaderData(frame *http2.MetaHeadersFrame) error { - var stream *MuxedStream - sid := frame.Header().StreamID - if sid == 0 { - return ErrUnexpectedFrameType - } - newStream := r.streams.IsPeerStreamID(sid) - if newStream { - // header request - // TODO support trailers (if stream exists) - ok, err := r.streams.AcquirePeerID(sid) - if !ok { - // ignore new streams while shutting down - return r.streamError(sid, err) - } - stream = r.newMuxedStream(sid) - // Set stream. Returns false if a stream already existed with that ID or we are shutting down, return false. - if !r.streams.Set(stream) { - // got HEADERS frame for an existing stream - // TODO support trailers - return r.streamError(sid, http2.ErrCodeInternal) - } - } else { - // header response - var err error - if stream, err = r.getStreamForFrame(frame); err != nil { - return r.defaultStreamErrorHandler(err, frame.Header()) - } - } - headers := make([]Header, 0, len(frame.Fields)) - for _, header := range frame.Fields { - switch header.Name { - case ":method": - stream.method = header.Value - case ":path": - u, err := url.Parse(header.Value) - if err == nil { - stream.path = u.Path - } - case "accept-encoding": - // remove accept-encoding if dictionaries are enabled - if r.dictionaries.write != nil { - continue - } - } - headers = append(headers, Header{Name: header.Name, Value: header.Value}) - } - stream.Headers = headers - if frame.Header().Flags.Has(http2.FlagHeadersEndStream) { - stream.receiveEOF() - return nil - } - if newStream { - go r.handleStream(stream) - } else { - close(stream.responseHeadersReceived) - } - return nil -} - -func (r *MuxReader) handleStream(stream *MuxedStream) { - defer stream.Close() - r.handler.ServeStream(stream) -} - -// Receives a data frame from a stream. A non-nil error is a connection error. -func (r *MuxReader) receiveFrameData(frame *http2.DataFrame, log *zerolog.Logger) error { - stream, err := r.getStreamForFrame(frame) - if err != nil { - return r.defaultStreamErrorHandler(err, frame.Header()) - } - data := frame.Data() - if len(data) > 0 { - n, err := stream.readBuffer.Write(data) - if err != nil { - return r.streamError(stream.streamID, http2.ErrCodeInternal) - } - r.bytesRead.IncrementBy(uint64(n)) - } - if frame.Header().Flags.Has(http2.FlagDataEndStream) { - if stream.receiveEOF() { - r.streams.Delete(stream.streamID) - log.Debug().Msgf("mux - read: stream closed: streamID: %d", frame.Header().StreamID) - } else { - log.Debug().Msgf("mux - read: shutdown receive side: streamID: %d", frame.Header().StreamID) - } - return nil - } - if !stream.consumeReceiveWindow(uint32(len(data))) { - return r.streamError(stream.streamID, http2.ErrCodeFlowControl) - } - r.metricsUpdater.updateReceiveWindow(stream.getReceiveWindow()) - return nil -} - -// Receive a PING from the peer. Update RTT and send/receive window metrics if it's an ACK. -func (r *MuxReader) receivePingData(frame *http2.PingFrame) { - ts := int64(binary.LittleEndian.Uint64(frame.Data[:])) - if !frame.IsAck() { - r.pingTimestamp.Set(ts) - return - } - - // Update the computed RTT aggregations with a new measurement. - // `ts` is the time that the probe was sent. - // We assume that `time.Now()` is the time we received that probe. - r.metricsUpdater.updateRTT(&roundTripMeasurement{ - receiveTime: time.Now(), - sendTime: time.Unix(0, ts), - }) -} - -// Receive a GOAWAY from the peer. Gracefully shut down our connection. -func (r *MuxReader) receiveGoAway(frame *http2.GoAwayFrame) error { - r.Shutdown() - // Close all streams above the last processed stream - lastStream := r.streams.LastLocalStreamID() - for i := frame.LastStreamID + 2; i <= lastStream; i++ { - if stream, ok := r.streams.Get(i); ok { - stream.Close() - } - } - return nil -} - -// Receive a USE_DICTIONARY from the peer. Setup dictionary for stream. -func (r *MuxReader) receiveUseDictionary(frame *http2.UnknownFrame) error { - payload := frame.Payload() - streamID := frame.StreamID - - // Check frame is formatted properly - if len(payload) != 1 { - return r.streamError(streamID, http2.ErrCodeProtocol) - } - - stream, err := r.getStreamForFrame(frame) - if err != nil { - return err - } - - if stream.receivedUseDict == true || stream.dictionaries.read == nil { - return r.streamError(streamID, http2.ErrCodeInternal) - } - - stream.receivedUseDict = true - dictID := payload[0] - - dictReader := stream.dictionaries.read.newReader(stream.readBuffer.(*SharedBuffer), dictID) - if dictReader == nil { - return r.streamError(streamID, http2.ErrCodeInternal) - } - - stream.readBufferLock.Lock() - stream.readBuffer = dictReader - stream.readBufferLock.Unlock() - - return nil -} - -// Receive a SET_DICTIONARY from the peer. Update dictionaries accordingly. -func (r *MuxReader) receiveSetDictionary(frame *http2.UnknownFrame) (err error) { - - payload := frame.Payload() - flags := frame.Flags - - stream, err := r.getStreamForFrame(frame) - if err != nil && err != ErrClosedStream { - return err - } - reader, ok := stream.readBuffer.(*h2DictionaryReader) - if !ok { - return r.streamError(frame.StreamID, http2.ErrCodeProtocol) - } - - // A SetDictionary frame consists of several - // Dictionary-Entries that specify how existing dictionaries - // are to be updated using the current stream data - // +---------------+---------------+ - // | Dictionary-Entry (+) ... - // +---------------+---------------+ - - for { - // Each Dictionary-Entry is formatted as follows: - // +-------------------------------+ - // | Dictionary-ID (8) | - // +---+---------------------------+ - // | P | Size (7+) | - // +---+---------------------------+ - // | E?| D?| Truncate? (6+) | - // +---+---------------------------+ - // | Offset? (8+) | - // +-------------------------------+ - - var size, truncate, offset uint64 - var p, e, d bool - - // Parse a single Dictionary-Entry - if len(payload) < 2 { // Must have at least id and size - return MuxerStreamError{"unexpected EOF", http2.ErrCodeProtocol} - } - - dictID := uint8(payload[0]) - p = (uint8(payload[1]) >> 7) == 1 - payload, size, err = http2ReadVarInt(7, payload[1:]) - if err != nil { - return - } - - if flags.Has(FlagSetDictionaryAppend) { - // Presence of FlagSetDictionaryAppend means we expect e, d and truncate - if len(payload) < 1 { - return MuxerStreamError{"unexpected EOF", http2.ErrCodeProtocol} - } - e = (uint8(payload[0]) >> 7) == 1 - d = (uint8((payload[0])>>6) & 1) == 1 - payload, truncate, err = http2ReadVarInt(6, payload) - if err != nil { - return - } - } - - if flags.Has(FlagSetDictionaryOffset) { - // Presence of FlagSetDictionaryOffset means we expect offset - if len(payload) < 1 { - return MuxerStreamError{"unexpected EOF", http2.ErrCodeProtocol} - } - payload, offset, err = http2ReadVarInt(8, payload) - if err != nil { - return - } - } - - setdict := setDictRequest{streamID: stream.streamID, - dictID: dictID, - dictSZ: size, - truncate: truncate, - offset: offset, - P: p, - E: e, - D: d} - - // Find the right dictionary - dict, err := r.dictionaries.read.getDictByID(dictID) - if err != nil { - return err - } - - // Register a dictionary update order for the dictionary and reader - updateEntry := &dictUpdate{reader: reader, dictionary: dict, s: setdict} - dict.queue = append(dict.queue, updateEntry) - reader.queue = append(reader.queue, updateEntry) - // End of frame - if len(payload) == 0 { - break - } - } - return nil -} - -// Receives header frames from a stream. A non-nil error is a connection error. -func (r *MuxReader) updateStreamWindow(frame *http2.WindowUpdateFrame) error { - stream, err := r.getStreamForFrame(frame) - if err != nil && err != ErrUnknownStream && err != ErrClosedStream { - return err - } - if stream == nil { - // ignore window updates on closed streams - return nil - } - stream.replenishSendWindow(frame.Increment) - r.metricsUpdater.updateSendWindow(stream.getSendWindow()) - return nil -} - -// Raise a stream processing error, closing the stream. Runs on the write thread. -func (r *MuxReader) streamError(streamID uint32, e http2.ErrCode) error { - r.streamErrors.RaiseError(streamID, e) - return nil -} - -func (r *MuxReader) connectionError(err error) error { - http2Code := http2.ErrCodeInternal - switch e := err.(type) { - case http2.ConnectionError: - http2Code = http2.ErrCode(e) - case MuxerProtocolError: - http2Code = e.h2code - } - r.sendGoAway(http2Code) - return err -} - -// Instruct the writer to send a GOAWAY message if possible. This may fail in -// the case where an existing GOAWAY message is in flight or the writer event -// loop already ended. -func (r *MuxReader) sendGoAway(errCode http2.ErrCode) { - select { - case r.goAwayChan <- errCode: - default: - } -} diff --git a/h2mux/muxreader_test.go b/h2mux/muxreader_test.go deleted file mode 100644 index 10ae7ff8..00000000 --- a/h2mux/muxreader_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package h2mux - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -var ( - methodHeader = Header{ - Name: ":method", - Value: "GET", - } - schemeHeader = Header{ - Name: ":scheme", - Value: "https", - } - pathHeader = Header{ - Name: ":path", - Value: "/api/tunnels", - } - respStatusHeader = Header{ - Name: ":status", - Value: "200", - } -) - -type mockOriginStreamHandler struct { - stream *MuxedStream -} - -func (mosh *mockOriginStreamHandler) ServeStream(stream *MuxedStream) error { - mosh.stream = stream - // Echo tunnel hostname in header - stream.WriteHeaders([]Header{respStatusHeader}) - return nil -} - -func assertOpenStreamSucceed(t *testing.T, stream *MuxedStream, err error) { - assert.NoError(t, err) - assert.Len(t, stream.Headers, 1) - assert.Equal(t, respStatusHeader, stream.Headers[0]) -} - -func TestMissingHeaders(t *testing.T) { - originHandler := &mockOriginStreamHandler{} - muxPair := NewDefaultMuxerPair(t, t.Name(), originHandler.ServeStream) - muxPair.Serve(t) - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - reqHeaders := []Header{ - { - Name: "content-type", - Value: "application/json", - }, - } - - stream, err := muxPair.EdgeMux.OpenStream(ctx, reqHeaders, nil) - assertOpenStreamSucceed(t, stream, err) - - assert.Empty(t, originHandler.stream.method) - assert.Empty(t, originHandler.stream.path) -} - -func TestReceiveHeaderData(t *testing.T) { - originHandler := &mockOriginStreamHandler{} - muxPair := NewDefaultMuxerPair(t, t.Name(), originHandler.ServeStream) - muxPair.Serve(t) - - reqHeaders := []Header{ - methodHeader, - schemeHeader, - pathHeader, - } - - ctx, cancel := context.WithTimeout(context.Background(), time.Second) - defer cancel() - - stream, err := muxPair.EdgeMux.OpenStream(ctx, reqHeaders, nil) - assertOpenStreamSucceed(t, stream, err) - - assert.Equal(t, methodHeader.Value, originHandler.stream.method) - assert.Equal(t, pathHeader.Value, originHandler.stream.path) -} diff --git a/h2mux/muxwriter.go b/h2mux/muxwriter.go deleted file mode 100644 index c4d8fded..00000000 --- a/h2mux/muxwriter.go +++ /dev/null @@ -1,311 +0,0 @@ -package h2mux - -import ( - "bytes" - "encoding/binary" - "io" - "time" - - "github.com/rs/zerolog" - - "golang.org/x/net/http2" - "golang.org/x/net/http2/hpack" -) - -type MuxWriter struct { - // f is used to write HTTP2 frames. - f *http2.Framer - // streams tracks currently-open streams. - streams *activeStreamMap - // streamErrors receives stream errors raised by the MuxReader. - streamErrors *StreamErrorMap - // readyStreamChan is used to multiplex writable streams onto the single connection. - // When a stream becomes writable its ID is sent on this channel. - readyStreamChan <-chan uint32 - // newStreamChan is used to create new streams with a given set of headers. - newStreamChan <-chan MuxedStreamRequest - // goAwayChan is used to send a single GOAWAY message to the peer. The element received - // is the HTTP/2 error code to send. - goAwayChan <-chan http2.ErrCode - // abortChan is used when shutting down ungracefully. When this becomes readable, all activity should stop. - abortChan <-chan struct{} - // pingTimestamp is an atomic value containing the latest received ping timestamp. - pingTimestamp *PingTimestamp - // A timer used to measure idle connection time. Reset after sending data. - idleTimer *IdleTimer - // connActiveChan receives a signal that the connection received some (read) activity. - connActiveChan <-chan struct{} - // Maximum size of all frames that can be sent on this connection. - maxFrameSize uint32 - // headerEncoder is the stateful header encoder for this connection - headerEncoder *hpack.Encoder - // headerBuffer is the temporary buffer used by headerEncoder. - headerBuffer bytes.Buffer - - // metricsUpdater is used to report metrics - metricsUpdater muxMetricsUpdater - // bytesWrote is the amount of bytes written to data frames since the last time we called metricsUpdater.updateOutBoundBytes() - bytesWrote *AtomicCounter - - useDictChan <-chan useDictRequest -} - -type MuxedStreamRequest struct { - stream *MuxedStream - body io.Reader -} - -func NewMuxedStreamRequest(stream *MuxedStream, body io.Reader) MuxedStreamRequest { - return MuxedStreamRequest{ - stream: stream, - body: body, - } -} - -func (r *MuxedStreamRequest) flushBody() { - io.Copy(r.stream, r.body) - r.stream.CloseWrite() -} - -func tsToPingData(ts int64) [8]byte { - pingData := [8]byte{} - binary.LittleEndian.PutUint64(pingData[:], uint64(ts)) - return pingData -} - -func (w *MuxWriter) run(log *zerolog.Logger) error { - defer log.Debug().Msg("mux - write: event loop finished") - - // routine to periodically communicate bytesWrote - go func() { - ticker := time.NewTicker(updateFreq) - defer ticker.Stop() - for { - select { - case <-w.abortChan: - return - case <-ticker.C: - w.metricsUpdater.updateOutBoundBytes(w.bytesWrote.Count()) - } - } - }() - - for { - select { - case <-w.abortChan: - log.Debug().Msg("mux - write: aborting writer thread") - return nil - case errCode := <-w.goAwayChan: - log.Debug().Msgf("mux - write: sending GOAWAY code %v", errCode) - err := w.f.WriteGoAway(w.streams.LastPeerStreamID(), errCode, []byte{}) - if err != nil { - return err - } - w.idleTimer.MarkActive() - case <-w.pingTimestamp.GetUpdateChan(): - log.Debug().Msg("mux - write: sending PING ACK") - err := w.f.WritePing(true, tsToPingData(w.pingTimestamp.Get())) - if err != nil { - return err - } - w.idleTimer.MarkActive() - case <-w.idleTimer.C: - if !w.idleTimer.Retry() { - return ErrConnectionDropped - } - log.Debug().Msg("mux - write: sending PING") - err := w.f.WritePing(false, tsToPingData(time.Now().UnixNano())) - if err != nil { - return err - } - w.idleTimer.ResetTimer() - case <-w.connActiveChan: - w.idleTimer.MarkActive() - case <-w.streamErrors.GetSignalChan(): - for streamID, errCode := range w.streamErrors.GetErrors() { - log.Debug().Msgf("mux - write: resetting stream with code: %v streamID: %d", errCode, streamID) - err := w.f.WriteRSTStream(streamID, errCode) - if err != nil { - return err - } - } - w.idleTimer.MarkActive() - case streamRequest := <-w.newStreamChan: - streamID := w.streams.AcquireLocalID() - streamRequest.stream.streamID = streamID - if !w.streams.Set(streamRequest.stream) { - // Race between OpenStream and Shutdown, and Shutdown won. Let Shutdown (and the eventual abort) take - // care of this stream. Ideally we'd pass the error directly to the stream object somehow so the - // caller can be unblocked sooner, but the value of that optimisation is minimal for most of the - // reasons why you'd call Shutdown anyway. - continue - } - if streamRequest.body != nil { - go streamRequest.flushBody() - } - err := w.writeStreamData(streamRequest.stream, log) - if err != nil { - return err - } - w.idleTimer.MarkActive() - case streamID := <-w.readyStreamChan: - stream, ok := w.streams.Get(streamID) - if !ok { - continue - } - err := w.writeStreamData(stream, log) - if err != nil { - return err - } - w.idleTimer.MarkActive() - case useDict := <-w.useDictChan: - err := w.writeUseDictionary(useDict) - if err != nil { - log.Error().Msgf("mux - write: error writing use dictionary: %s", err) - return err - } - w.idleTimer.MarkActive() - } - } -} - -func (w *MuxWriter) writeStreamData(stream *MuxedStream, log *zerolog.Logger) error { - log.Debug().Msgf("mux - write: writable: streamID: %d", stream.streamID) - chunk := stream.getChunk() - w.metricsUpdater.updateReceiveWindow(stream.getReceiveWindow()) - w.metricsUpdater.updateSendWindow(stream.getSendWindow()) - if chunk.sendHeadersFrame() { - err := w.writeHeaders(chunk.streamID, chunk.headers) - if err != nil { - log.Error().Msgf("mux - write: error writing headers: %s: streamID: %d", err, stream.streamID) - return err - } - log.Debug().Msgf("mux - write: output headers: streamID: %d", stream.streamID) - } - - if chunk.sendWindowUpdateFrame() { - // Send a WINDOW_UPDATE frame to update our receive window. - // If the Stream ID is zero, the window update applies to the connection as a whole - // RFC7540 section-6.9.1 "A receiver that receives a flow-controlled frame MUST - // always account for its contribution against the connection flow-control - // window, unless the receiver treats this as a connection error" - err := w.f.WriteWindowUpdate(chunk.streamID, chunk.windowUpdate) - if err != nil { - log.Error().Msgf("mux - write: error writing window update: %s: streamID: %d", err, stream.streamID) - return err - } - log.Debug().Msgf("mux - write: increment receive window by %d streamID: %d", chunk.windowUpdate, stream.streamID) - } - - for chunk.sendDataFrame() { - payload, sentEOF := chunk.nextDataFrame(int(w.maxFrameSize)) - err := w.f.WriteData(chunk.streamID, sentEOF, payload) - if err != nil { - log.Error().Msgf("mux - write: error writing data: %s: streamID: %d", err, stream.streamID) - return err - } - // update the amount of data wrote - w.bytesWrote.IncrementBy(uint64(len(payload))) - log.Debug().Msgf("mux - write: output data: %d: streamID: %d", len(payload), stream.streamID) - - if sentEOF { - if stream.readBuffer.Closed() { - // transition into closed state - if !stream.gotReceiveEOF() { - // the peer may send data that we no longer want to receive. Force them into the - // closed state. - log.Debug().Msgf("mux - write: resetting stream: streamID: %d", stream.streamID) - w.f.WriteRSTStream(chunk.streamID, http2.ErrCodeNo) - } else { - // Half-open stream transitioned into closed - log.Debug().Msgf("mux - write: closing stream: streamID: %d", stream.streamID) - } - w.streams.Delete(chunk.streamID) - } else { - log.Debug().Msgf("mux - write: closing stream write side: streamID: %d", stream.streamID) - } - } - } - return nil -} - -func (w *MuxWriter) encodeHeaders(headers []Header) ([]byte, error) { - w.headerBuffer.Reset() - for _, header := range headers { - err := w.headerEncoder.WriteField(hpack.HeaderField{ - Name: header.Name, - Value: header.Value, - }) - if err != nil { - return nil, err - } - } - return w.headerBuffer.Bytes(), nil -} - -// writeHeaders writes a block of encoded headers, splitting it into multiple frames if necessary. -func (w *MuxWriter) writeHeaders(streamID uint32, headers []Header) error { - encodedHeaders, err := w.encodeHeaders(headers) - if err != nil || len(encodedHeaders) == 0 { - return err - } - - blockSize := int(w.maxFrameSize) - // CONTINUATION is unnecessary; the headers fit within the blockSize - if len(encodedHeaders) < blockSize { - return w.f.WriteHeaders(http2.HeadersFrameParam{ - StreamID: streamID, - EndHeaders: true, - BlockFragment: encodedHeaders, - }) - } - - choppedHeaders := chopEncodedHeaders(encodedHeaders, blockSize) - // len(choppedHeaders) is at least 2 - if err := w.f.WriteHeaders(http2.HeadersFrameParam{StreamID: streamID, EndHeaders: false, BlockFragment: choppedHeaders[0]}); err != nil { - return err - } - for i := 1; i < len(choppedHeaders)-1; i++ { - if err := w.f.WriteContinuation(streamID, false, choppedHeaders[i]); err != nil { - return err - } - } - if err := w.f.WriteContinuation(streamID, true, choppedHeaders[len(choppedHeaders)-1]); err != nil { - return err - } - - return nil -} - -// Partition a slice of bytes into `len(slice) / blockSize` slices of length `blockSize` -func chopEncodedHeaders(headers []byte, chunkSize int) [][]byte { - var divided [][]byte - - for i := 0; i < len(headers); i += chunkSize { - end := i + chunkSize - - if end > len(headers) { - end = len(headers) - } - - divided = append(divided, headers[i:end]) - } - - return divided -} - -func (w *MuxWriter) writeUseDictionary(dictRequest useDictRequest) error { - err := w.f.WriteRawFrame(FrameUseDictionary, 0, dictRequest.streamID, []byte{byte(dictRequest.dictID)}) - if err != nil { - return err - } - payload := make([]byte, 0, 64) - for _, set := range dictRequest.setDict { - payload = append(payload, byte(set.dictID)) - payload = appendVarInt(payload, 7, uint64(set.dictSZ)) - payload = append(payload, 0x80) // E = 1, D = 0, Truncate = 0 - } - - err = w.f.WriteRawFrame(FrameSetDictionary, FlagSetDictionaryAppend, dictRequest.streamID, payload) - return err -} diff --git a/h2mux/muxwriter_test.go b/h2mux/muxwriter_test.go deleted file mode 100644 index 07e23bdc..00000000 --- a/h2mux/muxwriter_test.go +++ /dev/null @@ -1,26 +0,0 @@ -package h2mux - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestChopEncodedHeaders(t *testing.T) { - mockEncodedHeaders := make([]byte, 5) - for i := range mockEncodedHeaders { - mockEncodedHeaders[i] = byte(i) - } - chopped := chopEncodedHeaders(mockEncodedHeaders, 4) - - assert.Equal(t, 2, len(chopped)) - assert.Equal(t, []byte{0, 1, 2, 3}, chopped[0]) - assert.Equal(t, []byte{4}, chopped[1]) -} - -func TestChopEncodedEmptyHeaders(t *testing.T) { - mockEncodedHeaders := make([]byte, 0) - chopped := chopEncodedHeaders(mockEncodedHeaders, 3) - - assert.Equal(t, 0, len(chopped)) -} diff --git a/h2mux/readylist.go b/h2mux/readylist.go deleted file mode 100644 index d1a18c6d..00000000 --- a/h2mux/readylist.go +++ /dev/null @@ -1,151 +0,0 @@ -package h2mux - -import "sync" - -// ReadyList multiplexes several event signals onto a single channel. -type ReadyList struct { - // signalC is used to signal that a stream can be enqueued - signalC chan uint32 - // waitC is used to signal the ID of the first ready descriptor - waitC chan uint32 - // doneC is used to signal that run should terminate - doneC chan struct{} - closeOnce sync.Once -} - -func NewReadyList() *ReadyList { - rl := &ReadyList{ - signalC: make(chan uint32), - waitC: make(chan uint32), - doneC: make(chan struct{}), - } - go rl.run() - return rl -} - -// ID is the stream ID -func (r *ReadyList) Signal(ID uint32) { - select { - case r.signalC <- ID: - // ReadyList already closed - case <-r.doneC: - } -} - -func (r *ReadyList) ReadyChannel() <-chan uint32 { - return r.waitC -} - -func (r *ReadyList) Close() { - r.closeOnce.Do(func() { - close(r.doneC) - }) -} - -func (r *ReadyList) run() { - defer close(r.waitC) - var queue readyDescriptorQueue - var firstReady *readyDescriptor - activeDescriptors := newReadyDescriptorMap() - for { - if firstReady == nil { - select { - case i := <-r.signalC: - firstReady = activeDescriptors.SetIfMissing(i) - case <-r.doneC: - return - } - } - select { - case r.waitC <- firstReady.ID: - activeDescriptors.Delete(firstReady.ID) - firstReady = queue.Dequeue() - case i := <-r.signalC: - newReady := activeDescriptors.SetIfMissing(i) - if newReady != nil { - // key doesn't exist - queue.Enqueue(newReady) - } - case <-r.doneC: - return - } - } -} - -type readyDescriptor struct { - ID uint32 - Next *readyDescriptor -} - -// readyDescriptorQueue is a queue of readyDescriptors in the form of a singly-linked list. -// The nil readyDescriptorQueue is an empty queue ready for use. -type readyDescriptorQueue struct { - Head *readyDescriptor - Tail *readyDescriptor -} - -func (q *readyDescriptorQueue) Empty() bool { - return q.Head == nil -} - -func (q *readyDescriptorQueue) Enqueue(x *readyDescriptor) { - if x.Next != nil { - panic("enqueued already queued item") - } - if q.Empty() { - q.Head = x - q.Tail = x - } else { - q.Tail.Next = x - q.Tail = x - } -} - -// Dequeue returns the first readyDescriptor in the queue, or nil if empty. -func (q *readyDescriptorQueue) Dequeue() *readyDescriptor { - if q.Empty() { - return nil - } - x := q.Head - q.Head = x.Next - x.Next = nil - return x -} - -// readyDescriptorQueue is a map of readyDescriptors keyed by ID. -// It maintains a free list of deleted ready descriptors. -type readyDescriptorMap struct { - descriptors map[uint32]*readyDescriptor - free []*readyDescriptor -} - -func newReadyDescriptorMap() *readyDescriptorMap { - return &readyDescriptorMap{descriptors: make(map[uint32]*readyDescriptor)} -} - -// create or reuse a readyDescriptor if the stream is not in the queue. -// This avoid stream starvation caused by a single high-bandwidth stream monopolising the writer goroutine -func (m *readyDescriptorMap) SetIfMissing(key uint32) *readyDescriptor { - if _, ok := m.descriptors[key]; ok { - return nil - } - - var newDescriptor *readyDescriptor - if len(m.free) > 0 { - // reuse deleted ready descriptors - newDescriptor = m.free[len(m.free)-1] - m.free = m.free[:len(m.free)-1] - } else { - newDescriptor = &readyDescriptor{} - } - newDescriptor.ID = key - m.descriptors[key] = newDescriptor - return newDescriptor -} - -func (m *readyDescriptorMap) Delete(key uint32) { - if descriptor, ok := m.descriptors[key]; ok { - m.free = append(m.free, descriptor) - delete(m.descriptors, key) - } -} diff --git a/h2mux/readylist_test.go b/h2mux/readylist_test.go deleted file mode 100644 index 6ee9cfbf..00000000 --- a/h2mux/readylist_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package h2mux - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func assertEmpty(t *testing.T, rl *ReadyList) { - select { - case <-rl.ReadyChannel(): - t.Fatal("Spurious wakeup") - default: - } -} - -func assertClosed(t *testing.T, rl *ReadyList) { - select { - case _, ok := <-rl.ReadyChannel(): - assert.False(t, ok, "ReadyChannel was not closed") - case <-time.After(100 * time.Millisecond): - t.Fatalf("Timeout") - } -} - -func receiveWithTimeout(t *testing.T, rl *ReadyList) uint32 { - select { - case i := <-rl.ReadyChannel(): - return i - case <-time.After(100 * time.Millisecond): - t.Fatalf("Timeout") - return 0 - } -} - -func TestReadyListEmpty(t *testing.T) { - rl := NewReadyList() - - // no signals, receive should fail - assertEmpty(t, rl) -} -func TestReadyListSignal(t *testing.T) { - rl := NewReadyList() - assertEmpty(t, rl) - - rl.Signal(0) - if receiveWithTimeout(t, rl) != 0 { - t.Fatalf("Received wrong ID of signalled event") - } - - assertEmpty(t, rl) -} - -func TestReadyListMultipleSignals(t *testing.T) { - rl := NewReadyList() - assertEmpty(t, rl) - - // Signals should not block; - // Duplicate unhandled signals should not cause multiple wakeups - signalled := [5]bool{} - for i := range signalled { - rl.Signal(uint32(i)) - rl.Signal(uint32(i)) - } - // All signals should be received once (in any order) - for range signalled { - i := receiveWithTimeout(t, rl) - if signalled[i] { - t.Fatalf("Received signal %d more than once", i) - } - signalled[i] = true - } - for i := range signalled { - if !signalled[i] { - t.Fatalf("Never received signal %d", i) - } - } - assertEmpty(t, rl) -} - -func TestReadyListClose(t *testing.T) { - rl := NewReadyList() - rl.Close() - - // readyList.run() occurs in a separate goroutine, - // so there's no way to directly check that run() has terminated. - // Perform an indirect check: is the ready channel closed? - assertClosed(t, rl) - - // a second rl.Close() shouldn't cause a panic - rl.Close() - - // Signal shouldn't block after Close() - done := make(chan struct{}) - go func() { - for i := 0; i < 5; i++ { - rl.Signal(uint32(i)) - } - close(done) - }() - select { - case <-done: - case <-time.After(100 * time.Millisecond): - t.Fatal("Test timed out") - } -} - -func TestReadyDescriptorQueue(t *testing.T) { - var queue readyDescriptorQueue - items := [4]readyDescriptor{} - for i := range items { - items[i].ID = uint32(i) - } - - if !queue.Empty() { - t.Fatalf("nil queue should be empty") - } - queue.Enqueue(&items[3]) - queue.Enqueue(&items[1]) - queue.Enqueue(&items[0]) - queue.Enqueue(&items[2]) - if queue.Empty() { - t.Fatalf("Empty should be false after enqueue") - } - i := queue.Dequeue().ID - if i != 3 { - t.Fatalf("item 3 should have been dequeued, got %d instead", i) - } - i = queue.Dequeue().ID - if i != 1 { - t.Fatalf("item 1 should have been dequeued, got %d instead", i) - } - i = queue.Dequeue().ID - if i != 0 { - t.Fatalf("item 0 should have been dequeued, got %d instead", i) - } - i = queue.Dequeue().ID - if i != 2 { - t.Fatalf("item 2 should have been dequeued, got %d instead", i) - } - if !queue.Empty() { - t.Fatal("queue should be empty after dequeuing all items") - } - if queue.Dequeue() != nil { - t.Fatal("dequeue on empty queue should return nil") - } -} - -func TestReadyDescriptorMap(t *testing.T) { - m := newReadyDescriptorMap() - m.Delete(42) - // (delete of missing key should be a noop) - x := m.SetIfMissing(42) - if x == nil { - t.Fatal("SetIfMissing for new key returned nil") - } - if m.SetIfMissing(42) != nil { - t.Fatal("SetIfMissing for existing key returned non-nil") - } - // this delete has effect - m.Delete(42) - // the next set should reuse the old object - y := m.SetIfMissing(666) - if y == nil { - t.Fatal("SetIfMissing for new key returned nil") - } - if x != y { - t.Fatal("SetIfMissing didn't reuse freed object") - } -} diff --git a/h2mux/rtt.go b/h2mux/rtt.go deleted file mode 100644 index 350233e3..00000000 --- a/h2mux/rtt.go +++ /dev/null @@ -1,29 +0,0 @@ -package h2mux - -import ( - "sync/atomic" -) - -// PingTimestamp is an atomic interface around ping timestamping and signalling. -type PingTimestamp struct { - ts int64 - signal Signal -} - -func NewPingTimestamp() *PingTimestamp { - return &PingTimestamp{signal: NewSignal()} -} - -func (pt *PingTimestamp) Set(v int64) { - if atomic.SwapInt64(&pt.ts, v) != 0 { - pt.signal.Signal() - } -} - -func (pt *PingTimestamp) Get() int64 { - return atomic.SwapInt64(&pt.ts, 0) -} - -func (pt *PingTimestamp) GetUpdateChan() <-chan struct{} { - return pt.signal.WaitChannel() -} diff --git a/h2mux/sample/ghost-url.min.js b/h2mux/sample/ghost-url.min.js deleted file mode 100644 index eb4ecd50..00000000 --- a/h2mux/sample/ghost-url.min.js +++ /dev/null @@ -1 +0,0 @@ -!function(){"use strict";function a(a){var b,c=[];if(!a)return"";for(b in a)a.hasOwnProperty(b)&&(a[b]||a[b]===!1)&&c.push(b+"="+encodeURIComponent(a[b]));return c.length?"?"+c.join("&"):""}var b,c,d,e,f="https://cloudflare.ghost.io/ghost/api/v0.1/";d={api:function(){var d,e=Array.prototype.slice.call(arguments),g=f;return d=e.pop(),d&&"object"!=typeof d&&(e.push(d),d={}),d=d||{},d.client_id=b,d.client_secret=c,e.length&&e.forEach(function(a){g+=a.replace(/^\/|\/$/g,"")+"/"}),g+a(d)}},e=function(a){b=a.clientId?a.clientId:"",c=a.clientSecret?a.clientSecret:"",f=a.url?a.url:f.match(/{\{api-url}}/)?"":f},"undefined"!=typeof window&&(window.ghost=window.ghost||{},window.ghost.url=d,window.ghost.init=e),"undefined"!=typeof module&&(module.exports={url:d,init:e})}(); \ No newline at end of file diff --git a/h2mux/sample/index.html b/h2mux/sample/index.html deleted file mode 100644 index fe91d668..00000000 --- a/h2mux/sample/index.html +++ /dev/null @@ -1,537 +0,0 @@ - - - - - - - Cloudflare Blog - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
-
- - - -
-
-

Living In A Multi-Cloud World

-
- Published on - by Sergi Isasi. -
-
-
-

A few months ago at Cloudflare’s Internet Summit, we hosted a discussion on A Cloud Without Handcuffs with Joe Beda, one of the creators of Kubernetes, and Brandon Phillips, the co-founder of CoreOS. The conversation touched on multiple areas, but it’s clear that more and more companies are recognizing the need to have some strategy around hosting their applications on multiple cloud providers. Earlier this year,…

-
- -
- - - -
-
-

The Supreme Court Wanders into the Patent Troll Fight

-
- Published on - by Edo Royker. -
-
-
-

Next Monday, the US Supreme Court will hear oral arguments in Oil States Energy Services, LLC vs. Greene’s Energy Group, LLC, which is a case to determine whether the Inter Partes Review (IPR) administrative process at the US Patent and Trademark Office (USPTO) used to determine the validity of patents is constitutional. The constitutionality of the IPR process is one of the biggest legal issues facing innovative…

-
- -
- - - -
-
-

7 Cloudflare Apps Which Increase User Engagement on Your Site

-
- Published on - by Andrew Fitch. -
-
-
-

Cloudflare Apps now lists 95 apps from apps which grow email lists to apps which acquire new customers to apps which help site owners make more money. The great thing about these apps is that users don't have to have any coding or development skills. They can just sign up for the app and start using it on their sites. Let’s take a moment to highlight some…

-
- -
- - - -
-
-

The Super Secret Cloudflare Master Plan, or why we acquired Neumob

-
- Published on - by John Graham-Cumming. -
-
-
-

We announced today that Cloudflare has acquired Neumob. Neumob’s team built exceptional technology to speed up mobile apps, reduce errors on challenging mobile networks, and increase conversions. Cloudflare will integrate the Neumob technology with our global network to give Neumob truly global reach. It’s tempting to think of the Neumob acquisition as a point product added to the Cloudflare portfolio. But it actually represents a key…

-
- -
- - - -
-
-

Thwarting the Tactics of the Equifax Attackers

-
- Published on - by Alex Cruz Farmer. -
-
-
-

We are now 3 months on from one of the biggest, most significant data breaches in history, but has it redefined people's awareness on security? The answer to that is absolutely yes, awareness is at an all-time high. Awareness, however, does not always result in positive action. The fallacy which is often assumed is "surely, if I keep my software up to date with all the patches, that's…

-
- -
- - - -
-
-

Go, don't collect my garbage

-
- Published on - by Vlad Krasnov. -
-
-
-

Not long ago I needed to benchmark the performance of Golang on a many-core machine. I took several of the benchmarks that are bundled with the Go source code, copied them, and modified them to run on all available threads. In that case the machine has 24 cores and 48 threads. CC BY-SA 2.0 image by sponki25 I started with ECDSA P256 Sign, probably because I have…

-
- -
- - - -
-
-

Cloudflare Wants to Buy Your Meetup Group Pizza

-
- Published on - by Andrew Fitch. -
-
-
-

If you’re a web dev / devops / etc. meetup group that also works toward building a faster, safer Internet, I want to support your awesome group by buying you pizza. If your group’s focus falls within one of the subject categories below and you’re willing to give us a 30 second shout out and tweet a photo of your group and @Cloudflare, your meetup’s pizza…

-
- -
- - - -
-
-

On the dangers of Intel's frequency scaling

-
- Published on - by Vlad Krasnov. -
-
-
-

While I was writing the post comparing the new Qualcomm server chip, Centriq, to our current stock of Intel Skylake-based Xeons, I noticed a disturbing phenomena. When benchmarking OpenSSL 1.1.1dev, I discovered that the performance of the cipher ChaCha20-Poly1305 does not scale very well. On a single thread, it performed at the speed of approximately 2.89GB/s, whereas on 24 cores, and 48 threads it…

-
- -
- - - - - -
- - -
- - - - - - - - - - - - diff --git a/h2mux/sample/index1.html b/h2mux/sample/index1.html deleted file mode 100644 index 7607f3e9..00000000 --- a/h2mux/sample/index1.html +++ /dev/null @@ -1,515 +0,0 @@ - - - - - - - Living In A Multi-Cloud World - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
-
- -
- - -
-

Living In A Multi-Cloud World

-
- - by Sergi Isasi. -
- -
- -
-

A few months ago at Cloudflare’s Internet Summit, we hosted a discussion on A Cloud Without Handcuffs with Joe Beda, one of the creators of Kubernetes, and Brandon Phillips, the co-founder of CoreOS. The conversation touched on multiple areas, but it’s clear that more and more companies are recognizing the need to have some strategy around hosting their applications on multiple cloud providers.

- -

Earlier this year, Mary Meeker published her annual Internet Trends report which revealed that 22% of respondents viewed Cloud Vendor Lock-In as a top 3 concern, up from just 7% in 2012. This is in contrast to previous top concerns, Data Security and Cost & Savings, both of which dropped amongst those surveyed.

- -

Internet Trends

- -

At Cloudflare, our mission is to help build a better internet. To fulfill this mission, our customers need to have consistent access to the best technology and services, over time. This is especially the case with respect to storage and compute providers. This means not becoming locked-in to any single provider and taking advantage of multiple cloud computing vendors (such as Amazon Web Services or Google Cloud Platform) for the same end user services.

- -

The Benefits of Having Multiple Cloud Vendors

- -

There are a number of potential challenges when selecting a single cloud provider. Though there may be scenarios where it makes sense to consolidate on a single vendor, our belief is that it is important that customers are aware of their choice and downsides of being potentially locked-in to that particular vendor. In short, know what trade offs you are making should you decide to continue to consolidate parts of your network, compute, and storage with a single cloud provider. While not comprehensive, here are a few trade-offs you may be making if you are locked-in to one cloud.

- -

Cost Efficiences

- -

For some companies, there may be a cost savings involved in spreading traffic across multiple vendors. Some can take advantage of free or reduced cost tiers at lower volumes. Vendors may provide reduced costs for certain times of day that are lower utilized on their infrastructure. Applications can have varying compute requirements amongst layers of the application: some may require faster, immediate processing while others may benefit from delayed processing at a lower cost.

- -

Negotiation Strength

- -

One of the most important reasons to consider deploying in multiple cloud providers is to minimize your reliance on a single vendor’s technology for your critical business processes. As you become more vertically integrated with any vendor, your negotiation posture for pricing or favorable contract terms becomes diminished. Having production ready code available on multiple providers allows you to have less technical debt should you need to change. If you go a step further and are already sending traffic to multiple providers, you have minimized the technical debt required to switch and can negotiate from a position of strength.

- -

Business Continuity or High Availability

- -

While the major cloud providers are generally reliable, there have been a few notable outages in recent years. The most significant in recent memory being Amazon’s US-EAST S3 outage in February. Some organizations may have a policy specifying multiple providers for high availability while others should consider it where necessary and feasible as a best practice. A multi-cloud strategy can lower operational risk from a single vendor’s mistakes causing a significant outage for a mission critical application.

- -

Experimentation

- -

One of the exciting things about having competition in the space is the level of innovation and feature velocity of each provider. Every year there are major announcements of new products or features that may have a significant impact on improving your organization's competitive advantage. Having test and production environments in multiple providers gives your engineers the ability to understand and experiment with a new capability in the context of your technology stack and data. You may even try these features for a portion of your traffic and get real world data on any benefits realized.

- -

Cloudflare’s Role

- -

Cloudflare is an independent third party in your multi-cloud strategy. Our goal is to minimize the layers of lock-in between you and a provider and lower the effort of change. In particular, one area where we can help right away is to minimize the operational changes necessary at the network, similar to what Kubernetes can do at the storage and compute level. As a benefit of our network, you can also have a centralized point for security and operational control.

- -

Cloudflare Multi Cloud

- -

Cloudflare’s Load Balancing can easily be configured to act as your global application traffic aggregator and distribute your traffic amongst origins at as many clouds as you choose to utilize. Active layer 7 health checks continually probe your origins and can automatically move traffic in the case of network or application failure. All consolidated web traffic can be inspected and acted upon by Cloudflare’s best of breed Security services, providing a single control point and visibility across all application traffic, regardless of which cloud the origin may be on. You also have the benefit of Cloudflare’s Global Anycast Network, providing for better speed and higher availability regardless of which clouds your origins are hosted on.

- -

Billforward: Using Cloudflare to Implement Multi-Cloud

- -

Billforward is a San Francisco and London based startup that is focused and mission driven on changing the way people bill and charge their customers, providing a solution to the complexities of Quote-to-Cash. Their platform is built on a number of Rest APIs that other developers call to bill and generate revenue for their own companies.

- -

Billforward is using Cloudflare for its core customer facing application to failover traffic between Google Compute Engine and Amazon Web Services. Acting as a reverse proxy, Cloudflare receives all requests for and decides which of Billforward’s two configured cloud origins to use based upon the availability of that origin in near real-time. This allows Billforward to completely manage the connections to and from two disparate cloud providers using Cloudflare’s UI or API. Billforward is in the process of migrating all of their customer facing domains to a similar setup.

- -

Configuration

- -

Billforward has a single load balanced hostname with two available Pools. They’ve named the two Pools with “gce” and “aws” labels and each Pool has one Origin associated with it. All of the Pools are enabled and the entire LB/hostname is proxied through Cloudflare (as indicated by the orange cloud).

- -

Billforward Configuration UI

- -

Cloudflare probes Billforward’s Origins once every minute from all of Cloudflare’s data centers around the world (a feature available to all Load Balancing Enterprise customers). If Billforward’s GCE Origin goes down, Cloudflare will quickly and automatically failover to the AWS Origin with no actions required from Billforward’s team.

- -

Google Compute Engine was chosen as the primary provider for this application by virtue of cost. Martin Lee, Site Reliability Engineer at Billforward says, “Essentially, GCE is cheaper for our general purpose computing needs but we're more experienced with deployments in AWS. This strategy allows us to switch back and forth at will and avoid being tied in to either platform.” It is likely that Billforward will change the priority as pricing models evolve.
-

- -
-

“It's a fairly fast moving world and features released by cloud providers can have a meaningful impact on performance and cost on a week by week basis - it helps to stay flexible,” says Martin. “We may also change priority based on features.”

-
- -


For orchestration of the compute and storage layers, Billforward uses Docker containers managed through Rancher. They use distinct environments between cloud providers but are considering bridging an environment across cloud providers and using VPNs between them, which will enable them to move load between providers even more easily. “Our system is loosely coupled through a message queue,” adds Martin. “Having a container system across clouds means we can really take advantage of this - we can very easily move workloads across clouds without any danger of dropping tasks or ending up in an inconsistent state.”

- -

Benefits

- -

Billforward manages these connections at Cloudflare’s edge. Through this interface (or via the Cloudflare APIs), they can also manually move traffic from GCE to AWS by just disabling the GCE pool or by rearranging the Pool priority and make AWS the primary. These changes are near instant on the Cloudflare network and require no downtime to Billforward’s customer facing application. This allows them to act on potential advantageous pricing changes between the two cloud providers or move traffic to hit pricing tiers.

- -

In addition, Billforward is now not “locked-in” to either provider’s network; being able to move traffic and without any downtime means they can make traffic changes independent of Amazon or Google. They can also integrate additional cloud providers any time they deem fit: adding Microsoft Azure, for example, as a third Origin would be as simple as creating a new Pool and adding it to the Load Balancer.

- -

Billforward is a good example of a forward thinking company that is taking advantage of technologies from multiple providers to best serve their business and customers, while not being reliant on a single vendor. For further detail on their setup using Cloudflare, please check their blog.

-
- - - - - - -
- - - comments powered by Disqus - -
- - - - - - - -
- - -
- - - - - - - - - - - - diff --git a/h2mux/sample/index2.html b/h2mux/sample/index2.html deleted file mode 100644 index fe59d28e..00000000 --- a/h2mux/sample/index2.html +++ /dev/null @@ -1,502 +0,0 @@ - - - - - - - SCOTUS Wanders into Patent Troll Fight - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
- - -
-
- -
- - -
-

The Supreme Court Wanders into the Patent Troll Fight

-
- - by Edo Royker. -
- -
- -
-

Next Monday, the US Supreme Court will hear oral arguments in Oil States Energy Services, LLC vs. Greene’s Energy Group, LLC, which is a case to determine whether the Inter Partes Review (IPR) administrative process at the US Patent and Trademark Office (USPTO) used to determine the validity of patents is constitutional.

- -

The constitutionality of the IPR process is one of the biggest legal issues facing innovative technology companies, as the availability of this process has greatly reduced the anticipated costs, and thereby lessened the threat, of patent troll litigation. As we discuss in this blog post, it is ironic that the outcome of a case that is of such great importance to the technology community today may hinge on what courts in Britain were and were not doing more than 200 years ago.

- -

Thomas Rowlandson [Public domain], via Wikimedia Commons

- -

As we have discussed in prior blog posts, the stakes are high: if the Supreme Court finds IPR unconstitutional, then the entire system of administrative review by the USPTO — including IPR and ex parte processes — will be shuttered. This would be a mistake, as administrative recourse at the USPTO is one of the few ways to avoid the considerable costs and delays of federal court litigation, which can take years and run into the millions of dollars. Those heavy costs are often leveraged by patent trolls when they threaten litigation in the effort to procure easy and lucrative settlements from their targets.

- -

Cloudflare is Pursuing Our Fight Against Patent Trolls All the Way to the Steps of the Supreme Court

- -

Cloudflare joined Dell, Facebook, and a number of other companies, all practicing entities with large patent portfolios, in a brief amici curiae (or ‘friend of the court’ brief) in support of the IPR process, because it has a substantial positive impact on technological innovation in the United States. Amicus briefs allow parties who are interested in the outcome of a case, but are not parties to the immediate dispute before the court, to have input into the court’s deliberations.

- -

As many of you are aware, we were sued by Blackbird Technologies, a notorious patent troll, earlier this year for patent infringement, and initiated Project Jengo to crowd source prior art searches and invalidate Blackbird’s patents. One of our strategies for quickly and efficiently invalidating Blackbird’s patents is to take advantage of the IPR process at the USPTO, which can be completed in about half the time and at one tenth of the cost of a federal court case, and to initiate ex parte proceedings against Blackbird’s other patents that are overly broad and invalid.

- -

A full copy of the Amicus Brief we joined in the Oil States case is available here, and a summary of the argument follows.

- -

Oil States Makes its Case

- -

Oil States is an oilfield services and drilling equipment manufacturing company. The USPTO invalidated one of its patents related to oil drilling technology in an IPR proceeding while Oil States had a lawsuit pending against one of its competitors claiming infringement of its patent. After it lost the IPR, Oil States lost an appeal in a lower federal court based on the findings of the IPR proceeding. The Supreme Court agreed to hear the case to determine whether once the USPTO issues a patent, an inventor has a constitutionally protected property right that — under Article III of the U.S. Constitution (which outlines the powers of the judicial branch of the government), and the 7th Amendment (which addresses the right to a jury trial in certain types of cases) — cannot be revoked without intervention by the court system.

- -

Image by Paul Lowry

- -

As the patent owner, Oil States argues that the IPR process violates the relevant provisions of the constitution by allowing an administrative body, the Patent Trial and Appeal Board (PTAB)--a non-judicial forum, to decide a matter which was historically handled by the judiciary. This argument rests upon the premise that there was a historical analogue to cancellation of patent claims available in the judiciary. Since cancellation of patent claims was historically available in the judiciary, the cancellation of patent claims today must be consistent with that history and done exclusively by courts.

- -

This argument is flawed on multiple counts, which are set forth in the “friend of the court” brief we joined.

- -

First Flaw: An Administrative Process Even an Originalist Can Love

- -

As the amicus brief we joined points out, patent revocation did not historically rest within the exclusive province of the common law and chancery courts, the historical equivalents in Britain to the judiciary in the United States. Rather, prior to the Founding of the United States, patent revocation rested entirely with the Crown of England’s Privy Council, a non-judicial body comprising of advisors to the king or queen of England. It wasn’t until later that the Privy Council granted the chancery court (the judiciary branch) concurrent authority to revoke patents. Because a non-judicial body had the authority to revoke patents when the US Constitution was framed, the general principles of separation of powers and the right to trial in the Constitution do not require that patentability challenges be decided solely by courts.

- -

Second Flaw: The Judicial Role was Limited

- -

Not only did British courts share the power to address patent rights historically, the part shared by the the courts was significantly limited. Historically, the common-law and chancery courts only received a partial delegation of the Privy Council’s authority to invalidate patents. Courts only had the authority to invalidate patents for issues related to things like inequitable conduct (e.g., making false statements in the original patent application). The limited authority delegated to the England Courts did not include the authority to seek claim cancellation based on elements intrinsic to the patent or patent application, like lack of novelty or obviousness as done under an IPR proceeding. Rather, such authority remained with the Privy Council, a non-court authority, which decided questions like whether the invention was really new. Thus, like the PTAB, the Privy Council was a non-judicial body charged with responsibility to assess patent validity based on criteria that included the novelty of the invention.

- -

We think these arguments are compelling and provide very strong reasons why the Supreme Court should resist the request that such matters be resolved exclusively in federal courts. We hope that’s the position they do take because the real world implications are significant.

- -

Don’t Mess with a Good Thing

- -

The IPR process is not only consistent with the US Constitution, but it also advances the Patent Clause’s objective of promoting the progress of science and useful arts. That is, the “quid pro quo of the patent system; the public must receive meaningful disclosure in exchange for being excluded from practicing the invention for a limited period of time” by patent rights. (Enzo Biochem, Inc. v. Gen-probe Inc.) Congress created the IPR process in the America Invents Act in 2011 to use administrative review to weed out poor-quality patents that did not satisfy this quid pro quo because they had not actually disclosed very much. Congress sought to provide quick and cost effective administrative procedures for challenging the validity of patent claims that did not disclose novel inventions, or that claimed to disclose substantially more innovation than they actually did, to improve patent quality and restore confidence in the presumption of validity. In other words, Congress created a system to specifically permit the efficient challenge of the zealous assertion of vague and overly broad patents.

- -

As a recent study by the Congressional Research Service found, non-practicing entity (i.e., patent troll) patent litigation “activity cost defendants and licensees $29 billion in 2011, a 400 percent increase over $7 billion in 2005” and “the losses are mostly deadweight, with less than 25 percent flowing to innovation and at least that much going towards legal fees.” (see Brian T. Yeh, Cong. Research sERV., R42668) The IPR process enables innovative companies to navigate patent troll activity in an efficient manner and devote a greater proportion of their resources to research and development, rather than litigation or cost-of-litigation settlement fees for invalid patents.

- -

By EFF-Graphics (Own work), via Wikimedia Commons

- -

Additionally, the IPR process reduces the total number and associated costs of patent disputes in a number of ways.

- -
    -
  • Patent owners, especially patent trolls, are less likely to threaten litigation or file an infringement suit based on patent claims that they know or suspect to be invalid. In fact, patent owners who threaten or file suit merely to seek cost-of-litigation settlements have become far less prevalent because of the availability of the IPR process to reduce the cost of litigation.

  • -
  • Patent owners are less likely to initiate litigation out of concerns that the IPR proceedings may culminate in PTAB’s cancellation of all patent claims asserted in the infringement suit.

  • -
  • Where the PTAB does not cancel all asserted claims, statutory estoppel and the PTAB’s claim construction may serve to narrow the infringement issues to be resolved by the district court.

  • -
- -

Our hope is that the US Supreme Court justices take into full consideration the larger community of innovative companies that are helped by the IPR system in battling patent trolls, and do not limit their consideration to the implications on the parties to Oil States (neither of which is a non-practicing entity). As we have explained, not only does the IPR process enable innovative companies to focus their resources on technological innovation, instead of legal fees, but allowing the USPTO to administer IPR and ex parte proceedings is entirely consistent with the US Constitution.

- -

While we await a decision in Oil States, expect to see Cloudflare initiate IPR and ex parte proceedings against Blackbird Technologies patents in the coming months.

- -

We will make sure to keep you updated.

-
- - - - - - -
- - - comments powered by Disqus - -
- - - - - - - -
- - -
- - - - - - - - - - - - diff --git a/h2mux/sample/jquery.fitvids.js b/h2mux/sample/jquery.fitvids.js deleted file mode 100644 index a8551f6e..00000000 --- a/h2mux/sample/jquery.fitvids.js +++ /dev/null @@ -1,74 +0,0 @@ -/*global jQuery */ -/*jshint multistr:true browser:true */ -/*! -* FitVids 1.0.3 -* -* Copyright 2013, Chris Coyier - http://css-tricks.com + Dave Rupert - http://daverupert.com -* Credit to Thierry Koblentz - http://www.alistapart.com/articles/creating-intrinsic-ratios-for-video/ -* Released under the WTFPL license - http://sam.zoy.org/wtfpl/ -* -* Date: Thu Sept 01 18:00:00 2011 -0500 -*/ - -(function( $ ){ - - "use strict"; - - $.fn.fitVids = function( options ) { - var settings = { - customSelector: null - }; - - if(!document.getElementById('fit-vids-style')) { - - var div = document.createElement('div'), - ref = document.getElementsByTagName('base')[0] || document.getElementsByTagName('script')[0], - cssStyles = '­'; - - div.className = 'fit-vids-style'; - div.id = 'fit-vids-style'; - div.style.display = 'none'; - div.innerHTML = cssStyles; - - ref.parentNode.insertBefore(div,ref); - - } - - if ( options ) { - $.extend( settings, options ); - } - - return this.each(function(){ - var selectors = [ - "iframe[src*='player.vimeo.com']", - "iframe[src*='youtube.com']", - "iframe[src*='youtube-nocookie.com']", - "iframe[src*='kickstarter.com'][src*='video.html']", - "object", - "embed" - ]; - - if (settings.customSelector) { - selectors.push(settings.customSelector); - } - - var $allVideos = $(this).find(selectors.join(',')); - $allVideos = $allVideos.not("object object"); // SwfObj conflict patch - - $allVideos.each(function(){ - var $this = $(this); - if (this.tagName.toLowerCase() === 'embed' && $this.parent('object').length || $this.parent('.fluid-width-video-wrapper').length) { return; } - var height = ( this.tagName.toLowerCase() === 'object' || ($this.attr('height') && !isNaN(parseInt($this.attr('height'), 10))) ) ? parseInt($this.attr('height'), 10) : $this.height(), - width = !isNaN(parseInt($this.attr('width'), 10)) ? parseInt($this.attr('width'), 10) : $this.width(), - aspectRatio = height / width; - if(!$this.attr('id')){ - var videoID = 'fitvid' + Math.floor(Math.random()*999999); - $this.attr('id', videoID); - } - $this.wrap('
').parent('.fluid-width-video-wrapper').css('padding-top', (aspectRatio * 100)+"%"); - $this.removeAttr('height').removeAttr('width'); - }); - }); - }; -// Works with either jQuery or Zepto -})( window.jQuery || window.Zepto ); diff --git a/h2mux/sample/screen.css b/h2mux/sample/screen.css deleted file mode 100644 index 8583251a..00000000 --- a/h2mux/sample/screen.css +++ /dev/null @@ -1,70 +0,0 @@ -html,body,div,span,applet,object,iframe,h1,h2,h3,h4,h5,h6,p,blockquote,pre,a,abbr,acronym,address,big,cite,code,del,dfn,em,img,ins,kbd,q,s,samp,small,strike,strong,sub,sup,tt,var,b,u,i,center,dl,dt,dd,ol,ul,li,fieldset,form,label,legend,table,caption,tbody,tfoot,thead,tr,th,td,article,aside,canvas,details,embed,figure,figcaption,footer,header,menu,nav,output,ruby,section,summary,time,mark,audio,video{margin:0;padding:0;border:0;font:inherit;font-size:100%;vertical-align:baseline}html{line-height:1}ol,ul{list-style:none}table{border-collapse:collapse;border-spacing:0}caption,th,td{text-align:left;font-weight:normal;vertical-align:middle}q,blockquote{quotes:none}q:before,q:after,blockquote:before,blockquote:after{content:"";content:none}a img{border:none}article,aside,details,figcaption,figure,footer,header,menu,nav,section,summary{display:block}.clearfix,.dl-horizontal,.row,.columns,.wrapper,.control-group,.input-picker .ws-picker-body,.input-picker .ws-button-row,.input-picker .picker-grid,.input-picker .picker-list,.footer-nav,.modal-header,.modal-content,.modal-footer,.modal-body-section,.table-meta,.mod-row,.mod-toolbar{*zoom:1}.clearfix:before,.dl-horizontal:before,.row:before,.columns:before,.wrapper:before,.control-group:before,.input-picker .ws-picker-body:before,.input-picker .ws-button-row:before,.input-picker .picker-grid:before,.input-picker .picker-list:before,.footer-nav:before,.modal-header:before,.modal-content:before,.modal-footer:before,.modal-body-section:before,.table-meta:before,.mod-row:before,.mod-toolbar:before,.clearfix:after,.dl-horizontal:after,.row:after,.columns:after,.wrapper:after,.control-group:after,.input-picker .ws-picker-body:after,.input-picker .ws-button-row:after,.input-picker .picker-grid:after,.input-picker .picker-list:after,.footer-nav:after,.modal-header:after,.modal-content:after,.modal-footer:after,.modal-body-section:after,.table-meta:after,.mod-row:after,.mod-toolbar:after{content:'';display:table}.clearfix:after,.dl-horizontal:after,.row:after,.columns:after,.wrapper:after,.control-group:after,.input-picker .ws-picker-body:after,.input-picker .ws-button-row:after,.input-picker .picker-grid:after,.input-picker .picker-list:after,.footer-nav:after,.modal-header:after,.modal-content:after,.modal-footer:after,.modal-body-section:after,.table-meta:after,.mod-row:after,.mod-toolbar:after{clear:both}.border-box,.columns,.columns>.column,.btn,button,input[type="button"],input[type="submit"],input,select,textarea,.switch,.file:before,.proxy .cloud,.control-group,.input-prepend .btn,.input-prepend .add-on,.input-append .btn,.input-append .add-on,.flexbox .control-group,.flexbox .control-label,.flexbox .controls,.ws-input input,.ws-input .ws-input-seperator,pre,.mod-row,.mod-cell,.mod-setting-control,.mod-control-group .ui-block{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}@-webkit-keyframes menuTransition{0%{display:none;opacity:0}1%{display:block;opacity:0;top:80%}100%{display:none;opacity:1;top:102%}}@-moz-keyframes menuTransition{0%{display:none;opacity:0}1%{display:block;opacity:0;top:80%}100%{display:none;opacity:1;top:102%}}@-ms-keyframes menuTransition{0%{display:none;opacity:0}1%{display:block;opacity:0;top:80%}100%{display:none;opacity:1;top:102%}}@keyframes menuTransition{0%{display:none;opacity:0}1%{display:block;opacity:0;top:80%}100%{display:none;opacity:1;top:102%}}@-webkit-keyframes bgFadeOut{100%{background-color:transparent}}@-moz-keyframes bgFadeOut{100%{background-color:transparent}}@-ms-keyframes bgFadeOut{100%{background-color:transparent}}@keyframes bgFadeOut{100%{background-color:transparent}}@font-face{font-family:'Open Sans';font-style:normal;font-weight:300;format("embedded-opentype"),url('../fonts/opensans-300.woff') format("woff"),url('../fonts/opensans-300.ttf') format("truetype"),url('../fonts/opensans-300.svg#open_sanssemibold') format("svg")}@font-face{font-family:'Open Sans';font-style:normal;font-weight:400;src:url('../fonts/opensans-400.eot');src:local("Open Sans"),local("OpenSans"),url('../fonts/opensans-400.eot?#iefix') format("embedded-opentype"),url('../fonts/opensans-400.woff') format("woff"),url('../fonts/opensans-400.ttf') format("truetype"),url('../fonts/opensans-400.svg#open_sansregular') format("svg")}@font-face{font-family:'Open Sans';font-style:normal;font-weight:600;src:local("Open Sans Semibold"),local("OpenSans-Semibold"),format("embedded-opentype"),url('../fonts/opensans-600.woff') format("woff"),url('../fonts/opensans-600.ttf') format("truetype"),url('../fonts/opensans-600.svg#open_sanssemibold') format("svg")}@font-face{font-family:'Open Sans';font-style:normal;font-weight:700;src:url('../fonts/opensans-700.eot');src:local("Open Sans Bold"),local("OpenSans-Bold"),url('../fonts/opensans-700.eot?#iefix') format("embedded-opentype"),url('../fonts/opensans-700.woff') format("woff"),url('../fonts/opensans-700.ttf') format("truetype"),url('../fonts/opensans-700.svg#open_sansbold') format("svg")}@font-face{font-family:'Open Sans';font-style:italic; - font-weight:300; - src:local("Open Sans Light Italic"),local("OpenSansLight-Italic")format("embedded-opentype"),url('../fonts/opensans-300i.woff') format("woff"),url('../fonts/opensans-300i.ttf') format("truetype"),url('../fonts/opensans-300i.svg#open_sanslight_italic') format("svg")}@font-face{font-family:'Open Sans';font-style:italic;font-weight:400;src:url('../fonts/opensans-400i.eot');src:local("Open Sans Italic"),local("OpenSans-Italic"),url('../fonts/opensans-400i.eot?#iefix') format("embedded-opentype"),url('../fonts/opensans-400i.woff') format("woff"),url('../fonts/opensans-400i.ttf') format("truetype"),url('../fonts/opensans-400i.svg#open_sansitalic') format("svg")}.select2-container{position:relative;vertical-align:top;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.select2-container .select2-choice{color:#333}.select2-container.select2-drop-above .select2-choice{background-color:#fff;border-bottom-color:#b1b1b1}.select2-choice{background-color:#fff;border:1px solid #b1b1b1;display:block;font-size:0.93333rem;font-weight:400;line-height:1.2;padding:0.53333rem 0 0.53333rem 0.8rem;position:relative;text-decoration:none;white-space:nowrap;-webkit-user-select:none;-moz-user-select:none;user-select:none;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.select2-choice:hover{border-color:#989898}.select2-choice .select2-chosen{margin-right:3rem;min-height:1em;display:block;overflow:hidden;white-space:nowrap;-o-text-overflow:ellipsis;-ms-text-overflow:ellipsis;text-overflow:ellipsis}.select2-choice .select2-arrow{background:transparent;border-left:1px solid #b1b1b1;display:block;height:100%;position:absolute;right:0;top:0;width:2rem}.select2-choice .select2-arrow b{display:block;width:100%;height:100%;position:relative}.select2-choice .select2-arrow b:before,.select2-choice .select2-arrow b:after{border:4px solid transparent;border-bottom-color:#bebebe;content:'';display:block;height:0;left:50%;margin-left:-5px;margin-top:-9px;position:absolute;top:50%;width:0}.select2-choice .select2-arrow b:before{border-bottom-color:transparent;border-top-color:#bebebe;margin-top:3px}.select2-choice abbr{display:none}.select2-allowclear .select2-choice .select2-chosen{margin-right:3.5rem}.select2-allowclear .select2-choice abbr{display:block}.select2-container,.select2-drop,.select2-search,.select2-search input{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.select2-drop{background:#fff;border:1px solid #b1b1b1;border-top:0;color:#333;font-size:0.86667rem;margin-top:-2px;position:absolute;top:100%;width:100%;z-index:1010;-webkit-box-shadow:0 4px 5px rgba(0,0,0,0.15);-moz-box-shadow:0 4px 5px rgba(0,0,0,0.15);box-shadow:0 4px 5px rgba(0,0,0,0.15)}.select2-drop.select2-drop-above{margin-top:1px;border-top:1px solid #b1b1b1;border-bottom:0;-webkit-box-shadow:0 -4px 5px rgba(0,0,0,0.15);-moz-box-shadow:0 -4px 5px rgba(0,0,0,0.15);box-shadow:0 -4px 5px rgba(0,0,0,0.15)}.select2-drop.select2-drop-above .select2-search input{margin-top:4px}.select2-search{min-height:1.73333rem;margin:0;padding-left:0.26667rem;padding-right:0.26667rem;white-space:nowrap;width:100%;z-index:1020;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.select2-search input{background:#fff url('../images/cloudflare-sprite.png') no-repeat -35px -26px;border:1px solid #b1b1b1;font-size:1em;height:auto;outline:0;margin:0;min-height:1.73333rem;padding:0.26667rem 0.33333rem 0.26667rem 1.73333rem;width:100%;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none;-webkit-transition:none;-moz-transition:none;-o-transition:none;transition:none}.select2-search input.select2-active{background:#fff url('../images/spinner.gif') no-repeat 0 0}.select2-search-hidden{display:block;left:-9999em;position:absolute}.select2-container-active .select2-choice,.select2-container-active .select2-choices{border:1px solid #2f7bbf;outline:none;-webkit-box-shadow:0 0 5px rgba(0,0,0,0.3);-moz-box-shadow:0 0 5px rgba(0,0,0,0.3);box-shadow:0 0 5px rgba(0,0,0,0.3)}.select2-dropdown-open .select2-choice{border:1px solid #b1b1b1;border-bottom-color:transparent;-webkit-box-shadow:inset 0 1px 0 #fff;-moz-box-shadow:inset 0 1px 0 #fff;box-shadow:inset 0 1px 0 #fff}.select2-dropdown-open .select2-choice .select2-arrow{background:transparent;border-left:none}.select2-results{margin:0.26667rem 0.26667rem 0.26667rem 0;max-height:20em;overflow-x:hidden;overflow-y:auto;padding:0 0 0 0.26667rem;position:relative}.select2-results .select2-result-sub{margin:0 0 0 0}.select2-results .select2-result-sub>li .select2-result-label{padding-left:1.33333rem}.select2-results .select2-result-sub .select2-result-sub>li .select2-result-label{padding-left:2.66667rem}.select2-results .select2-result-sub .select2-result-sub .select2-result-sub>li .select2-result-label{padding-left:4rem}.select2-results .select2-result-sub .select2-result-sub .select2-result-sub .select2-result-sub>li .select2-result-label{padding-left:5.33333rem}.select2-results .select2-result-sub .select2-result-sub .select2-result-sub .select2-result-sub .select2-result-sub>li .select2-result-label{padding-left:6.66667rem}.select2-results .select2-result-sub .select2-result-sub .select2-result-sub .select2-result-sub .select2-result-sub .select2-result-sub>li .select2-result-label{padding-left:7.33333rem}.select2-results .select2-result-sub .select2-result-sub .select2-result-sub .select2-result-sub .select2-result-sub .select2-result-sub .select2-result-sub>li .select2-result-label{padding-left:8rem}.select2-results li{list-style:none;display:list-item}.select2-results li.select2-result-with-children>.select2-result-label{font-weight:600}.select2-results .select2-no-results,.select2-results .select2-result-label{cursor:pointer;margin:0;padding:0.2rem 0.46667rem 0.26667rem}.select2-results .select2-highlighted{background:#2f7bbf;color:#fff}.select2-results .select2-highlighted em{background:transparent}.select2-results .select2-no-results,.select2-results .select2-searching,.select2-results .select2-selection-limit{display:list-item}.select2-results .select2-disabled{display:none}.select2-more-results.select2-active{background:#f2f2f2 url('../images/spinner.gif') no-repeat 100%}.select2-more-results{background:#f2f2f2;display:list-item}.select2-container.select2-container-disabled .select2-choice{background-color:#f2f2f2;background-image:none;border:1px solid #bebebe;cursor:default}.select2-container.select2-container-disabled .select2-choice div{background-color:#f2f2f2;background-image:none;border-left:0}.select2-container-multi{min-width:10em}.select2-container-multi .select2-choices{background-color:#fff;border:1px solid #b1b1b1;cursor:text;height:auto;height:1%;margin:0;min-height:1.86667rem;overflow:hidden;padding:0.13333em;position:relative;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.select2-container-multi .select2-choices li{float:left;list-style:none}.select2-container-multi .select2-choices .select2-search-field{margin:0;padding:0;white-space:nowrap}.select2-container-multi .select2-choices .select2-search-field input{background:transparent;border:0;color:#333;font-size:1rem;height:1.5rem;margin:1px 0;outline:0;padding:0 0.13333em}.select2-container-multi .select2-choices .select2-search-field input:focus{-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.select2-container-multi .select2-choices .select2-search-field input.select2-active{background:#fff url('../images/spinner.gif') no-repeat 100%}.select2-container-multi .select2-choices .select2-search-choice{background-color:#fff;border:1px solid #b1b1b1;color:#333;cursor:default;line-height:0.86667rem;margin:0.2rem 0 0.13333rem 0.33333rem;padding:0.2rem 0.33333rem 0.2rem 1.2rem;position:relative}.select2-container-multi.select2-container-active .select2-choices{border:1px solid #2f7bbf;outline:none;-webkit-box-shadow:0 0 5px rgba(0,0,0,0.3);-moz-box-shadow:0 0 5px rgba(0,0,0,0.3);box-shadow:0 0 5px rgba(0,0,0,0.3)}.select2-default{color:#333}.select2-container-multi .select2-choices .select2-container-multi .select2-choices .select2-search-choice .select2-search-choice-close{cursor:default}.select2-container-multi .select2-choices .select2-search-choice-focus{background:#f2f2f2}.select2-search-choice-close{background:url('../images/cloudflare-sprite.png') no-repeat top right;border:0;cursor:pointer;display:block;font-size:1px;height:0.8rem;outline:0;position:absolute;right:2.46667rem;text-decoration:none;top:0.6rem;width:0.8rem}.select2-search-choice-close:hover{background-position:right -11px;cursor:pointer}.select2-container-multi .select2-search-choice-close{left:0.53333rem}.select2-container-multi .select2-choices .select2-search-choice .select2-search-choice-close:hover{background-position:right -11px}.select2-container-multi .select2-choices .select2-search-choice-focus .select2-search-choice-close{background-position:right -11px}.select2-container-multi.select2-container-disabled .select2-choices{background-color:#fafafa;background-image:none;border:1px solid #cbcbcb;cursor:default}.select2-container-multi.select2-container-disabled .select2-choices .select2-search-choice{background-image:none;background-color:#fafafa;border:1px solid #cbcbcb;padding:0.2rem 0.33333rem 0.2rem 0.33333rem}.select2-container-multi.select2-container-disabled .select2-choices .select2-search-choice .select2-search-choice-close,.select2-display-none{display:none}.select2-result-selectable .select2-match,.select2-result-unselectable .select2-result-selectable .select2-match{text-decoration:underline}.select2-result-unselectable .select2-match{text-decoration:none}.select2-offscreen{position:absolute;left:-9999px}.select2-drop-mask{bottom:0;left:0;max-height:100%;max-width:100%;position:fixed;right:0;top:0;z-index:1009}@media only screen and (-webkit-min-device-pixel-ratio: 1.5){.select2-search input,.select2-search-choice-close,.select2-container .select2-choice abbr{background-image:url('../images/select2x2-cf.png') !important;background-repeat:no-repeat !important;background-size:60px 45px !important}.select2-search input{background-position:100% -24px !important}}.flashblocker-assumed{min-height:20px;min-width:20px;z-index:2147483647}.cue-display{position:absolute !important;margin:0;padding:0px !important;max-width:100% !important;max-height:100% !important;border:none !important;background:none !important;text-align:center;visibility:hidden;font-family:sans-serif;font-size:12px;white-space:pre-wrap;overflow:hidden}.cue-display b{font-weight:bold}.cue-display i{font-style:italic}.cue-display u{text-decoration:underline}.cue-display span.cue-wrapper{position:absolute;left:0;bottom:0;right:0;display:block;padding:0;margin:0;width:100%;font-size:160%;color:#fff;visibility:visible !important}.cue-display .cue-line{display:block}.cue-display span.cue{display:inline-block;padding:3px 5px;background:#000;background:rgba(0,0,0,0.8);color:#fff}.cue-display .description-cues{position:absolute;top:-99px;left:-99px;display:block;width:5px;height:5px;overflow:hidden}mark{background-color:#ff9;color:#000;font-style:italic;font-weight:bold}.ws-important-hide{display:none !important;visibility:hidden !important;position:absolute;top:-999999px}.webshims-visual-hide{position:absolute !important;top:0 !important;left:0 !important;visibility:hidden !important;width:0 !important;height:0 !important;overflow:hidden !important}.webshims-visual-hide *{visibility:hidden !important}#swflocalstorageshim-wrapper{position:absolute;top:-999px;left:-9999px;overflow:hidden;width:215px;min-width:215px !important;z-index:2147483647}#swflocalstorageshim-wrapper .polyfill-exceeded-message{margin:0 0 5px;padding:5px;background:#ffd;color:#000;font-size:13px}#swflocalstorageshim-wrapper object{min-height:138px !important;min-width:215px !important}details{overflow:hidden}summary{position:relative}.closed-details-child{display:none !important}.closed-details-summary .details-open-indicator{background-position:0 -20px}.polyfill-important .details-open-indicator{margin:-1px 0 0 !important;padding:0 !important;border:0 !important;display:inline-block !important;width:16px !important;height:11px !important; vertical-align:middle !important}.polyfill-important .closed-details-summary .details-open-indicator{background-position:0 -20px !important}summary.summary-has-focus{outline:1px dotted #aaa;outline-offset:-1px}::selection,::-moz-selection{background:rgba(246,139,31,0.2);color:#333}img::selection,img::-moz-selection{background:rgba(246,139,31,0.3)}html,body{color:#333;font-family:"Open Sans",Helvetica,Arial,sans-serif;font-size:15px;line-height:1.5;-webkit-tap-highlight-color:rgba(246,139,31,0.3);-webkit-font-smoothing:antialiased}b,strong{font-weight:700}i,em{font-style:italic}small{font-size:80%}sup,.sup,sub,.sub{font-size:60%;position:relative;vertical-align:top}sup,.sup{top:0.25em}sub,.sub{bottom:0.25em;vertical-align:bottom}p .ui-item{margin:0 0.4rem;vertical-align:baseline}ul{list-style-type:disc}ul.circle{list-style-type:circle}ul.disc{list-style-type:disc}ul.square{list-style-type:square}ol{list-style-type:decimal}ol.roman{list-style-type:lower-roman}ol.roman-upper{list-style-type:upper-roman}ol.alpha{list-style-type:lower-alpha}ol,ul{list-style-position:outside;margin-left:3em}ol.unstyled,ul.unstyled,.exceptions-list{list-style-type:none;margin-left:0}ol.inline,ol.inline li,ul.inline,ul.inline li{display:inline;margin:0}a{color:#2f7bbf;outline:none;text-decoration:none;-webkit-transition:all 0.15s ease;-moz-transition:all 0.15s ease;-o-transition:all 0.15s ease;transition:all 0.15s ease}a:hover{color:#f68b1f}a:focus{color:#62a1d8;outline:none}a:active{color:#c16508;outline:none}h1,h2,h3,h4,h5,h6{font-weight:600}h1 small,h2 small,h3 small,h4 small{color:#7e7e7e;font-size:50%}h1,.h1{font-size:2.4rem;line-height:1.2}h2,.h2{font-size:2rem;line-height:1.3}h3,.h3{font-size:1.66667rem;line-height:1.3}h4,.h4,.lead{font-size:1.2rem;line-height:1.3}h5,.h5{font-size:1rem}h6,.h6{font-size:0.93333rem}header .subheadline{margin-top:0}.section-head,legend{border-bottom:1px solid #e0e0e0;margin-bottom:0.5rem;padding-bottom:0.5rem}dl{margin-bottom:1.5em}dt{font-weight:600}dd{margin-left:1.5em}.dl-horizontal{width:100%}.dl-horizontal dt{clear:left;float:left;text-align:right;width:30%}.dl-horizontal dd{margin-left:30%;padding-left:1em}.small{font-size:0.8em;line-height:1.3}.normal{font-size:1rem;font-weight:400}.screen-reader-text,.assistive-text{height:0;overflow:hidden;position:absolute;text-indent:200%;white-space:nowrap;width:0}blockquote{color:#7e7e7e;font-size:1.13333rem}blockquote cite{display:block;font-style:italic;margin-top:1em}blockquote cite:before{content:'\2014';padding-right:0.35em}.subheadline{color:#7e7e7e;font-weight:300}.lead{font-weight:400}.text-info{color:#2f7bbf}.text-important,.text-error{color:#bd2426}.text-success{color:#9bca3e}.text-warning{color:#f68b1f}.text-nonessential{color:#7e7e7e}.well{background-color:#f5f5f5;padding:1.5em;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.well.compact{padding:1em}p+p,p+ul,p+ol,p+dl,p+table,ul+p,ul+h2,ul+h3,ul+h4,ul+h5,ul+h6,ol+p,ol+h2,ol+h3,ol+h4,ol+h5,ol+h6{margin-top:1.5em}h1+p,p+h1,p+h2,p+h3,p+h4,p+h5,p+h6{margin-top:1.25em}h1+h2,h1+h3,h2+h3,h3+h4,h4+h5{margin-top:0.25em}h2+p{margin-top:1em}h1+h4,h1+h5,h1+h6,h2+h4,h2+h5,h2+h6,h3+h5,h3+h6,h3+p,h4+p,h5+p{margin-top:0.5em}.navigation ul{list-style:none;margin-left:0}.navigation a{color:#2f7bbf;text-decoration:none;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.navigation a:hover{color:#333}.sidebar blockquote{font-size:1.13333rem}.sidebar .title{color:#777}#main{min-height:400px}img,object{height:auto;max-width:100%}section,.section{margin-bottom:2.5rem;margin-top:2.5rem}section.compact,.section.compact{margin-bottom:1.5rem;margin-top:1.5rem}audio{display:none;height:0;width:0;overflow:hidden}video{overflow:hidden}video,audio[controls],audio.webshims-controls{display:inline-block;min-height:3rem;min-width:2.66667rem}video>*,audio>*{visibility:hidden}.no-swf video>*,.no-swf audio>*{visibility:inherit}.row{clear:both;display:block}.col-1{margin-left:0;max-width:100%;width:100%}.col-2{margin-left:0;max-width:100%;width:100%}.col-3{margin-left:0;max-width:100%;width:100%}.col-4{margin-left:0;max-width:100%;width:100%}.col-5{margin-left:0;max-width:100%;width:100%}.col-6{margin-left:0;max-width:100%;width:100%}.col-7{margin-left:0;max-width:100%;width:100%}.col-8{margin-left:0;max-width:100%;width:100%}.col-9{margin-left:0;max-width:100%;width:100%}.col-10{margin-left:0;max-width:100%;width:100%}.col-11{margin-left:0;max-width:100%;width:100%}.col-12{margin-left:0;max-width:100%;width:100%}.col-13{margin-left:0;max-width:100%;width:100%}.col-14{margin-left:0;max-width:100%;width:100%}.col-15{margin-left:0;max-width:100%;width:100%}.col-16{margin-left:0;max-width:100%;width:100%}.columns{display:block;list-style:none;padding:0}.columns img,.columns input,.columns select,.columns object,.columns textarea{max-width:100%}.columns>.column{float:left;padding-bottom:3rem}.columns,.columns>.column{width:100%}.width-third{width:33.3334%}.width-half{width:50%}.width-full,.mod-group{width:100%}.wrapper{margin-left:auto;margin-right:auto;width:90%}.primary-content{margin-bottom:2.66667em;margin-top:1.33333em}@media screen and (max-width: 49.1em){.tablet-only,.desktop-only{display:none !important}}@media screen and (min-width: 49.2em){.wrapper{width:47.2rem}.primary-content{float:left;margin:0 0 0 16.8rem;width:30.4rem}.sidebar{float:left;margin-left:-47.2rem;width:13.6rem}.reverse-sidebar .primary-content{margin-left:0}.reverse-sidebar .sidebar{margin-left:3.2rem}.primary-content:only-child{float:none;margin-left:auto;margin-right:auto}.columns>.column{padding-bottom:0}.columns.two>.column,.columns.cols-2>.column,.columns.four>.column,.columns.cols-4>.column{padding-left:0;padding-right:1.5rem;width:50%}.columns.two>.column:nth-child(even),.columns.cols-2>.column:nth-child(even),.columns.four>.column:nth-child(even),.columns.cols-4>.column:nth-child(even){padding-left:1.5rem;padding-right:0}.columns.two>.column:nth-child(odd),.columns.cols-2>.column:nth-child(odd),.columns.four>.column:nth-child(odd),.columns.cols-4>.column:nth-child(odd){clear:left}.columns.two>.column:nth-child(n+3),.columns.cols-2>.column:nth-child(n+3),.columns.four>.column:nth-child(n+3),.columns.cols-4>.column:nth-child(n+3){padding-top:3rem}.columns.three>.column,.columns.cols-3>.column{padding-left:2rem;width:33.3333333333333%}.columns.three>.column:first-child,.columns.three>.column:nth-child(3n+1),.columns.cols-3>.column:first-child,.columns.cols-3>.column:nth-child(3n+1){clear:left;padding-left:0;padding-right:2rem}.columns.three>.column:nth-child(3n+2),.columns.cols-3>.column:nth-child(3n+2){padding-left:1rem;padding-right:1rem}.columns.three>.column:nth-child(n+4),.columns.cols-3>.column:nth-child(n+4){padding-top:3rem}.columns.three>.column:nth-child(-n+3),.columns.cols-3>.column:nth-child(-n+3){padding-top:0}}@media screen and (min-width: 66em){.col-1{display:block;float:left;margin-left:48px;width:1rem}.col-2{display:block;float:left;margin-left:48px;width:5.2rem}.col-3{display:block;float:left;margin-left:48px;width:9.4rem}.col-4{display:block;float:left;margin-left:48px;width:13.6rem}.col-5{display:block;float:left;margin-left:48px;width:17.8rem}.col-6{display:block;float:left;margin-left:48px;width:22rem}.col-7{display:block;float:left;margin-left:48px;width:26.2rem}.col-8{display:block;float:left;margin-left:48px;width:30.4rem}.col-9{display:block;float:left;margin-left:48px;width:34.6rem}.col-10{display:block;float:left;margin-left:48px;width:38.8rem}.col-11{display:block;float:left;margin-left:48px;width:43rem}.col-12{display:block;float:left;margin-left:48px;width:47.2rem}.col-13{display:block;float:left;margin-left:48px;width:51.4rem}.col-14{display:block;float:left;margin-left:48px;width:55.6rem}.col-15{display:block;float:left;margin-left:48px;width:59.8rem}.col-16{display:block;float:left;margin-left:48px;width:64rem}[class*="col-"]:first-child{margin-left:0}.wrapper{width:64rem}.wrapper.wide{max-width:100%;width:72.4rem}.primary-content{float:left;margin-left:21rem;width:43rem}.sidebar{float:left;margin-left:-64rem;width:17.8rem}.wide .primary-content{width:51.4rem}.wide .sidebar{margin-left:-72.4rem}.columns>.column{padding-bottom:0}.columns.four>.column,.columns.cols-4>.column{padding-left:2.25rem;width:25%}.columns.four>.column:nth-child(odd),.columns.cols-4>.column:nth-child(odd){clear:none}.columns.four>.column:first-child,.columns.four>.column:nth-child(4n+1),.columns.cols-4>.column:first-child,.columns.cols-4>.column:nth-child(4n+1){clear:left;padding-left:0;padding-right:2.25rem}.columns.four>.column:nth-child(4n+2),.columns.cols-4>.column:nth-child(4n+2){padding-left:0.75rem;padding-right:1.5rem}.columns.four>.column:nth-child(4n+3),.columns.cols-4>.column:nth-child(4n+3){padding-left:1.5rem;padding-right:0.75rem}.columns.four>.column:nth-child(n+5),.columns.cols-4>.column:nth-child(n+5){padding-top:3rem}.columns.four>.column:nth-child(-n+4),.columns.cols-4>.column:nth-child(-n+4){padding-top:0}}.btn,button,input[type="button"],input[type="submit"]{background-color:transparent;border:1px solid #dedede;color:#333;font-size:0.93333rem;font-weight:400;line-height:1.2;margin:0;padding:0.6em 1.33333em 0.53333em;-webkit-user-select:none;-moz-user-select:none;user-select:none;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px;-webkit-transition:all 0.2s ease;-moz-transition:all 0.2s ease;-o-transition:all 0.2s ease;transition:all 0.2s ease}.btn:hover,.input-picker .picker-list td button.checked-value,button:hover,input[type="button"]:hover,input[type="submit"]:hover{background-color:rgba(0,0,0,0.05);border-color:#585858;color:#333}.btn:focus,button:focus,input[type="button"]:focus,input[type="submit"]:focus{color:inherit;outline:none;-webkit-box-shadow:inset 0 0 4px rgba(0,0,0,0.3);-moz-box-shadow:inset 0 0 4px rgba(0,0,0,0.3);box-shadow:inset 0 0 4px rgba(0,0,0,0.3)}.btn.active,.btn:active,button.active,button:active,input[type="button"].active,input[type="button"]:active,input[type="submit"].active,input[type="submit"]:active{background-color:rgba(0,0,0,0.05);border-color:#333;color:#1a1a1a}.btn::-moz-focus-inner,button::-moz-focus-inner,input[type="button"]::-moz-focus-inner,input[type="submit"]::-moz-focus-inner{padding:0;border:0}.btn .caret,button .caret,input[type="button"] .caret,input[type="submit"] .caret{border-top-color:inherit;margin-left:0.25em;margin-top:0.18333em}.btn-large{padding:1rem 1.66667rem}.btn-cta,.btn-cta-alt{padding:1rem 3rem}.btn-std,.btn-primary,.btn-std-alt,.btn-primary-alt{background-color:#2f7bbf;border-color:transparent;color:#fff}.btn-std:hover,.btn-primary:hover,.btn-std-alt:hover,.btn-primary-alt:hover{background-color:#62a1d8;border-color:#2f7bbf;color:#fff}.btn-std.active,.btn-std:focus,.btn-std:active,.btn-primary.active,.btn-primary:focus,.btn-primary:active,.btn-std-alt.active,.btn-std-alt:focus,.btn-std-alt:active,.btn-primary-alt.active,.btn-primary-alt:focus,.btn-primary-alt:active{background-color:#62a1d8;border-color:#163959;color:#fff}.btn-std-alt,.btn-primary-alt{background-color:transparent;border-color:#2f7bbf;color:#2f7bbf}.btn-cta,.btn-success,.btn-accept,.btn-accept-alt,.btn-cancel,.btn-cancel-alt,.btn-delete,.btn-cta-alt,.btn-success-alt,.btn-accept-alt{background-color:#9bca3e;border-color:transparent;color:#fff}.btn-cta:hover,.btn-success:hover,.btn-accept:hover,.btn-accept-alt:hover,.btn-cancel:hover,.btn-cancel-alt:hover,.btn-delete:hover,.btn-cta-alt:hover,.btn-success-alt:hover,.btn-accept-alt:hover{background-color:#bada7a;border-color:#9bca3e;color:#fff}.btn-cta.active,.btn-cta:focus,.btn-cta:active,.btn-success.active,.active.btn-accept,.active.btn-accept-alt,.active.btn-cancel,.active.btn-cancel-alt,.active.btn-delete,.btn-success:focus,.btn-accept:focus,.btn-accept-alt:focus,.btn-cancel:focus,.btn-cancel-alt:focus,.btn-delete:focus,.btn-success:active,.btn-accept:active,.btn-accept-alt:active,.btn-cancel:active,.btn-cancel-alt:active,.btn-delete:active,.btn-cta-alt.active,.btn-cta-alt:focus,.btn-cta-alt:active,.btn-success-alt.active,.active.btn-accept-alt,.btn-success-alt:focus,.btn-accept-alt:focus,.btn-success-alt:active,.btn-accept-alt:active{background-color:#bada7a;border-color:#516b1d;color:#fff}.btn-accept,.btn-accept-alt,.btn-cancel,.btn-cancel-alt,.btn-delete{font-family:FontAwesome;font-weight:normal;font-style:normal;text-decoration:inherit;-webkit-font-smoothing:antialiased;min-height:2.425em;overflow:hidden;padding-left:1.13333em;padding-right:1.13333em;position:relative;text-align:left;text-indent:-9999px;width:0;white-space:nowrap}.btn-accept:after,.btn-accept-alt:after,.btn-cancel:after,.btn-cancel-alt:after,.btn-delete:after{content:'\f00c';display:block;font-size:1.75em;height:100%;left:0;line-height:0;position:absolute;speak:none;text-align:center;text-indent:0;top:50%;width:100%;-webkit-transition-delay:0.2s;-moz-transition-delay:0.2s;-o-transition-delay:0.2s;transition-delay:0.2s}.btn-accept:before,.btn-accept-alt:before,.btn-cancel:before,.btn-cancel-alt:before,.btn-delete:before,.btn-accept:after,.btn-accept-alt:after,.btn-cancel:after,.btn-cancel-alt:after,.btn-delete:after{-webkit-transition:opacity 0.2s ease;-moz-transition:opacity 0.2s ease;-o-transition:opacity 0.2s ease;transition:opacity 0.2s ease}.btn-cta-alt,.btn-success-alt,.btn-accept-alt{background-color:transparent;border-color:#9bca3e;color:#9bca3e}.btn-secondary,.btn-delete{background-color:#ededed;border-color:transparent;color:#7e7e7e}.btn-secondary:hover,.btn-delete:hover{background-color:#ededed;border-color:#7e7e7e;color:#333}.btn-secondary.active,.active.btn-delete,.btn-secondary:focus,.btn-delete:focus,.btn-secondary:active,.btn-delete:active{background-color:#ededed;border-color:#585858;color:#0d0d0d}.btn-danger,.btn-cancel,.btn-cancel-alt,.btn-important,.btn-error,.btn-danger-alt,.btn-cancel-alt,.btn-important-alt,.btn-error-alt{background-color:#bd2426;border-color:transparent;color:#fff}.btn-danger:hover,.btn-cancel:hover,.btn-cancel-alt:hover,.btn-important:hover,.btn-error:hover,.btn-danger-alt:hover,.btn-cancel-alt:hover,.btn-important-alt:hover,.btn-error-alt:hover{background-color:#de5052;border-color:#bd2426;color:#fff}.btn-danger.active,.active.btn-cancel,.active.btn-cancel-alt,.btn-danger:focus,.btn-cancel:focus,.btn-cancel-alt:focus,.btn-danger:active,.btn-cancel:active,.btn-cancel-alt:active,.btn-important.active,.btn-important:focus,.btn-important:active,.btn-error.active,.btn-error:focus,.btn-error:active,.btn-danger-alt.active,.active.btn-cancel-alt,.btn-danger-alt:focus,.btn-cancel-alt:focus,.btn-danger-alt:active,.btn-cancel-alt:active,.btn-important-alt.active,.btn-important-alt:focus,.btn-important-alt:active,.btn-error-alt.active,.btn-error-alt:focus,.btn-error-alt:active{background-color:#de5052;border-color:#521010;color:#fff}.btn-danger-alt,.btn-cancel-alt,.btn-important-alt,.btn-error-alt{background-color:transparent;border-color:#bd2426;color:#bd2426}.btn-warning,.btn-warning-alt{background-color:#f68b1f;border-color:transparent;color:#fff}.btn-warning:hover,.btn-warning-alt:hover{background-color:#f9b169;border-color:#f68b1f;color:#fff}.btn-warning.active,.btn-warning:focus,.btn-warning:active,.btn-warning-alt.active,.btn-warning-alt:focus,.btn-warning-alt:active{background-color:#f9b169;border-color:#904b06;color:#fff}.btn-warning-alt{background-color:transparent;border-color:#f68b1f;color:#f68b1f}.btn-link{background-color:transparent;border-color:transparent;color:#2f7bbf}.btn-cancel:after,.btn-cancel-alt:after{content:'\f00d'}.btn-cancel-alt{border-color:#dedede;color:#dedede}.btn-delete:after{content:'\f014'}.btn.disabled,.btn.loading,button[disabled],input.btn[disabled]{cursor:default;background-color:#ededed;border-color:transparent;color:#a1a1a1;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=50);opacity:0.5}.btn.disabled:hover,.btn.loading:hover,.input-picker .picker-list td button.loading.checked-value,.input-picker .picker-list td button.disabled.checked-value,.btn.disabled:focus,.btn.loading:focus,.btn.disabled:active,.btn.loading:active,button[disabled]:hover,button[disabled]:focus,button[disabled]:active,input.btn[disabled]:hover,input.btn[disabled]:focus,input.btn[disabled]:active{background-color:#ededed;border-color:transparent;color:#a1a1a1}.btn.loading{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=80);opacity:0.8;overflow:hidden;position:relative}.btn.loading:before{background:transparent url('../images/spinner.gif') no-repeat center;content:'';display:block;height:100%;left:0;position:absolute;top:0;width:100%;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1;-webkit-transform:scale(1, 1);-moz-transform:scale(1, 1);-ms-transform:scale(1, 1);-o-transform:scale(1, 1);transform:scale(1, 1);-webkit-animation:fadeInZoom 0.2s ease-in-out;-moz-animation:fadeInZoom 0.2s ease-in-out;-ms-animation:fadeInZoom 0.2s ease-in-out;-o-animation:fadeInZoom 0.2s ease-in-out;animation:fadeInZoom 0.2s ease-in-out}.btn.loading,.btn.loading:after,.btn.loading:hover,.input-picker .picker-list td button.loading.checked-value{color:#ededed}.btn.loading:after{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=0);opacity:0}@-webkit-keyframes fadeInZoom{0%{opacity:0;-webkit-transform:scale(0)}100%{opacity:1;-webkit-transform:scale(1)}}@-moz-keyframes fadeInZoom{0%{opacity:0;-moz-transform:scale(0)}100%{opacity:1;-moz-transform:scale(1)}}@-ms-keyframes fadeInZoom{0%{opacity:0;-ms-transform:scale(0)}100%{opacity:1;-ms-transform:scale(1)}}@keyframes fadeInZoom{0%{topacity:0;ransform:scale(0)}100%{opacity:1;transform:scale(1)}}input[type="button"].btn,input[type="submit"].btn{height:auto;padding:0.53333em 1.33333em 0.6em}.btn-single-line{max-width:100%;overflow:hidden;padding-bottom:0.6em;position:relative;text-overflow:ellipsis;}.btn-single-line.btn-wicons{padding-left:2em;padding-right:2em}.btn-single-line .icon,.btn-single-line .caret{line-height:1.5;margin-top:-0.6em;position:absolute;top:50%}.btn-single-line .caret{margin-left:0;margin-top:0;right:0.8em}.btn-single-line .icon:first-child{left:0.6em}.btn-single-line .icon:last-child{right:0.6em}.btn+.btn,.btn+.ui-item{margin-left:0.4rem}.btn-block{display:block;margin-bottom:0.4rem;text-align:center}.btn-block+.btn-block{margin-left:auto}.btn a{color:#333}.btn-std a,.btn-primary a{color:#fff}.btn-error a,.btn-danger a,.btn-cancel a,.btn-cancel-alt a,.btn-important a{color:#fff}.btn-cta a,.btn-success a,.btn-accept a,.btn-accept-alt a,.btn-cancel a,.btn-cancel-alt a,.btn-delete a{color:#fff}legend{display:block;margin-bottom:1.5rem;width:100%}input,select,textarea,.switch{background:#fff;border:1px solid #b1b1b1;color:#333;font-family:"Open Sans",Helvetica,Arial,sans-serif;font-size:0.86667em;line-height:1.24;margin:0 0 0.75em;max-width:100%;outline:none;padding:0.45em 0.75em;vertical-align:middle;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline;-webkit-transition:all 0.2s ease;-moz-transition:all 0.2s ease;-o-transition:all 0.2s ease;transition:all 0.2s ease;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}input:hover,select:hover,textarea:hover,.switch:hover{border-color:#989898}input:focus,select:focus,textarea:focus,.switch:focus{border-color:#2f7bbf;outline:none;-webkit-box-shadow:0 0 8px rgba(47,123,191,0.5);-moz-box-shadow:0 0 8px rgba(47,123,191,0.5);box-shadow:0 0 8px rgba(47,123,191,0.5)}input.readonly,input.disabled,input[disabled],input[readonly],select.readonly,select.disabled,select[disabled],select[readonly],textarea.readonly,textarea.disabled,textarea[disabled],textarea[readonly],.switch.readonly,.switch.disabled,.switch[disabled],.switch[readonly]{background-color:#f7f7f7;border-color:#cbcbcb;color:#7e7e7e;cursor:not-allowed}select{position:relative;-webkit-appearance:none;-moz-appearance:none;appearance:none}input.ui-state-error,input.ui-state-invalid,input.user-error{border-color:#bd2426}input,select{height:2.26667rem}select[size],select[multiple]{height:auto}input[type="radio"],input[type="checkbox"]{height:16px;line-height:normal;margin:1px 0 0;padding:0.45em;position:relative;width:16px;-webkit-appearance:none;-moz-appearance:none;appearance:none}input[type="radio"]:before,input[type="checkbox"]:before{background-color:transparent;color:transparent;content:'';position:absolute;-webkit-transition:all 0.15s ease-out;-moz-transition:all 0.15s ease-out;-o-transition:all 0.15s ease-out;transition:all 0.15s ease-out}input[type="radio"].ui-state-valid,input[type="radio"].user-success,input[type="checkbox"].ui-state-valid,input[type="checkbox"].user-success{border-color:#b1b1b1}input[type="radio"],input[type="radio"]:before{-webkit-border-radius:50%;-moz-border-radius:50%;-ms-border-radius:50%;-o-border-radius:50%;border-radius:50%}input[type="radio"]:before{height:20%;left:40%;top:40%;width:20%}input[type="radio"]:checked:before{background-color:#333;height:60%;left:20%;top:20%;width:60%}input[type="checkbox"]:before{content:'\f00c';font-family:FontAwesome;font-size:1.25em;left:-0.06667em;top:-0.2em}input[type="checkbox"]:checked:before{color:#333}label{display:block;font-size:0.86667rem;margin-bottom:0.38333em}.radio,.checkbox{min-height:1rem;padding-left:2em}.radio input[type="radio"],.checkbox input[type="checkbox"]{float:left;margin-left:-2em;margin-top:0.26667em}.radio.inline,.checkbox.inline{display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline;margin-bottom:0;padding-top:0.13333em;vertical-align:middle}.radio.inline+.inline,.checkbox.inline+.inline{margin-left:0.4rem}.input-mini{width:4rem}.input-small{width:9.4rem}.input-medium{width:17.8rem}.input-large{width:26.2rem}.input-xlarge{width:34.6rem}.input-xxlarge{width:43rem}.file{border:1px solid #b1b1b1;margin-bottom:1em;position:relative;padding:0;width:24rem;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.file input{border:0;margin-bottom:0;padding:0;width:10em;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=0);opacity:0}.file:before{background-color:#2f7bbf;color:#fff;content:'Choose File';height:100%;left:0;padding:0.53333rem 1.33333rem;position:absolute;top:0}.switch,.proxy{background-color:#fff;border:1px solid #b1b1b1;color:#fff;cursor:pointer;font-size:0;height:2.26667rem;overflow:hidden;margin:0;padding:0;position:relative;width:5.334rem}.switch input,.proxy input{position:absolute;-webkit-user-select:none;-moz-user-select:none;user-select:none;-webkit-appearance:checkbox-container;-moz-appearance:checkbox-container;appearance:checkbox-container}.switch input,.switch input.user-success,.proxy input,.proxy input.user-success{background:transparent;border-color:transparent}.switch input:checked:before,.proxy input:checked:before{content:''}.switch input:focus,.proxy input:focus{outline:none;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.switch:after,.switch:before{background-color:#9bca3e;color:#fff;content:'On';font-size:0.86667rem;line-height:1.5;height:100%;left:0;padding:0.45rem 0;position:absolute;text-align:center;top:0;width:51%}.switch:before{background-color:#b1b1b1;content:'Off';left:auto;right:0;width:50%}.switch .knob{background:#f7f7f7;border:1px solid #b1b1b1;border-bottom:none;border-top:none;display:block;font-size:0.86667rem;height:100%;left:-1px;top:0;width:2.667rem;z-index:2;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px;-webkit-transition:all 0.15s linear;-moz-transition:all 0.15s linear;-o-transition:all 0.15s linear;transition:all 0.15s linear;position:relative}.switch .knob:before,.switch .knob:after{border:4px solid transparent;border-left-color:inherit;content:'';display:block;height:0;left:50%;margin-left:2px;margin-top:-3px;position:absolute;top:50%;width:0}.switch .knob:before{border-left-color:transparent;border-right-color:inherit;margin-left:-10px}.switch input:checked+.knob{left:50%}.proxy{background:transparent;border:0;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline;height:34px;width:55px}.proxy .cloud{border:1px solid transparent;display:block;height:100%;left:0;position:absolute;top:0;width:100%}.proxy .cloud:before,.proxy .cloud:after{background:transparent url('../images/cloudflare-sprite-small.png') 0 -120px no-repeat;content:'';display:block;height:100%;left:0;opacity:1;position:absolute;top:0;width:100%;-webkit-transition:opacity,0.15s ease;-moz-transition:opacity,0.15s ease;-o-transition:opacity,0.15s ease;transition:opacity,0.15s ease}.proxy .cloud:after{opacity:0;background-position:0 -188px}.proxy input:checked+.cloud:before{opacity:0}.proxy input:checked+.cloud:after{opacity:1}.proxy input:focus+.cloud{border-color:#dedede}.control-group{padding:0.75em 0;position:relative;width:100%}.control-group input,.control-group select{margin-bottom:0}.control-group.info input,.control-group.info select,.control-group.info .select2-container,.control-group.info .select2-choice,.control-group.info .select2-choice div{border-color:#2f7bbf;color:#333}.control-group.info label,.control-group.info .control-label{color:#2f7bbf}.control-group.info .help-inline,.control-group.info .help-block{color:#2f7bbf}.control-group.error input,.control-group.error select,.control-group.error .select2-container,.control-group.error .select2-choice,.control-group.error .select2-choice div{border-color:#bd2426;color:#521010}.control-group.error label,.control-group.error .control-label{color:#bd2426}.control-group.error .help-inline,.control-group.error .help-block{color:#bd2426}.control-group.success input,.control-group.success select,.control-group.success .select2-container,.control-group.success .select2-choice,.control-group.success .select2-choice div{border-color:#9bca3e;color:#333}.control-group.success label,.control-group.success .control-label{color:#516b1d}.control-group.success .help-inline,.control-group.success .help-block{color:#516b1d}.control-group.warning input,.control-group.warning select,.control-group.warning .select2-container,.control-group.warning .select2-choice,.control-group.warning .select2-choice div{border-color:#f68b1f;color:#904b06}.control-group.warning label,.control-group.warning .control-label{color:#f68b1f}.control-group.warning .help-inline,.control-group.warning .help-block{color:#f68b1f}.controls input,.controls select,.controls textarea{max-width:100%}.controls .radio:only-child,.controls .checkbox:only-child{margin-bottom:0}.controls label,.control-label label{line-height:1.3}.input-stacked input,.input-stacked select,.input-stacked textarea,.input-stacked .select2-container{display:block;margin-bottom:0}.input-stacked input ~ input,.input-stacked input ~ select,.input-stacked input ~ .select2-container,.input-stacked select ~ input,.input-stacked select ~ select,.input-stacked select ~ .select2-container,.input-stacked textarea ~ input,.input-stacked textarea ~ select,.input-stacked textarea ~ .select2-container,.input-stacked .select2-container ~ input,.input-stacked .select2-container ~ select,.input-stacked .select2-container ~ .select2-container{margin-top:0.4rem}.input-prepend,.input-append{font-size:0;margin:0 0 1rem;vertical-align:middle;white-space:nowrap;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.input-prepend input,.input-prepend select,.input-append input,.input-append select{font-size:0.86667rem;margin:0;position:relative;vertical-align:top;-webkit-border-radius:0 2px 2px 0;-moz-border-radius:0 2px 2px 0;-ms-border-radius:0 2px 2px 0;-o-border-radius:0 2px 2px 0;border-radius:0 2px 2px 0}.input-prepend .btn,.input-prepend .add-on,.input-append .btn,.input-append .add-on{border-color:#b1b1b1;font-size:0.86667rem;line-height:1.24;height:2.26667rem;margin:0 -1px 0 0;min-width:1.06667em;position:relative;text-align:center;width:auto;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline;-webkit-border-radius:0;-moz-border-radius:0;-ms-border-radius:0;-o-border-radius:0;border-radius:0}.input-prepend .btn:first-child,.input-prepend .add-on:first-child,.input-append .btn:first-child,.input-append .add-on:first-child{-webkit-border-radius:2px 0 0 2px;-moz-border-radius:2px 0 0 2px;-ms-border-radius:2px 0 0 2px;-o-border-radius:2px 0 0 2px;border-radius:2px 0 0 2px}.input-prepend .btn:last-child,.input-prepend .add-on:last-child,.input-append .btn:last-child,.input-append .add-on:last-child{-webkit-border-radius:0 2px 2px 0;-moz-border-radius:0 2px 2px 0;-ms-border-radius:0 2px 2px 0;-o-border-radius:0 2px 2px 0;border-radius:0 2px 2px 0}.input-prepend .add-on,.input-append .add-on{background-color:#dedede;border:1px solid;color:#7e7e7e;padding:0.53333rem 0.66667rem}.input-prepend input:hover,.input-prepend input:active,.input-prepend input:focus,.input-prepend select:hover,.input-prepend select:active,.input-prepend select:focus,.input-prepend .add-on:hover,.input-prepend .add-on:active,.input-prepend .add-on:focus,.input-append input:hover,.input-append input:active,.input-append input:focus,.input-append select:hover,.input-append select:active,.input-append select:focus,.input-append .add-on:hover,.input-append .add-on:active,.input-append .add-on:focus{z-index:5}.input-append input,.input-append select{-webkit-border-radius:2px 0 0 2px;-moz-border-radius:2px 0 0 2px;-ms-border-radius:2px 0 0 2px;-o-border-radius:2px 0 0 2px;border-radius:2px 0 0 2px}.input-append .btn,.input-append .add-on{margin-left:-1px;margin-right:0}.input-prepend.input-append input,.input-prepend.input-append select{-webkit-border-radius:0;-moz-border-radius:0;-ms-border-radius:0;-o-border-radius:0;border-radius:0}.input-prepend.input-append .btn,.input-prepend.input-append .add-on{margin-left:-1px;margin-right:0}.input-prepend.input-append .btn:first-child,.input-prepend.input-append .add-on:first-child{margin-left:0;margin-right:-1px}.form-stacked .control-group>input[name]:only-of-type,.form-stacked .control-group>select:only-of-type,.form-stacked .control-group>.select2-container:only-of-type{display:block;width:100%}.form-stacked input[type="checkbox"],.form-stacked input[type="button"],.form-stacked input[type="submit"]{display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline;width:auto}.form-inline .btn,.form-inline button,.form-inline label,.form-inline input,.form-inline select,.form-inline .help-inline{display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline;margin-bottom:0;margin-left:0.4rem;vertical-align:middle}.form-inline .btn:first-child,.form-inline button:first-child,.form-inline label:first-child,.form-inline input:first-child,.form-inline select:first-child,.form-inline .help-inline:first-child{margin-left:0}.form-inline input[type="radio"],.form-inline input[type="checkbox"]{float:none;margin:0 0.2em 0 0}.form-inline .radio,.form-inline .checkbox{padding-left:0}.ui-search{font-size:1rem;position:relative}.ui-search input{padding-left:2.5em}.ui-search .icon-search,.ui-search .clear-icon{font-size:1.08333em;line-height:1.3;padding:0.45em 0.75em;position:absolute}.ui-search .icon-search{color:#4d4d4d;left:0;position:absolute;top:0}.ui-search .clear-icon{color:#e4e4e4;cursor:pointer;display:none;top:0;right:0}.ui-search .clear-icon:hover{color:#989898}.ui-search .clear-icon:active{color:#7e7e7e}.help-inline,.help-block{font-size:0.86667rem}.help-inline:empty,.help-block:empty{display:none}.help-inline{display:inline;padding:0 0.5em}.help-block{display:block;margin:0 0 1em}.input-assist{font-size:0.8rem;line-height:2.26667rem;position:absolute;right:0.75em;top:0;-webkit-user-select:none;-moz-user-select:none;user-select:none}input ~ .alert,select ~ .alert,form .alert{margin-top:0.5em}.input-alert{position:relative}.input-alert:after{border:10px solid transparent;border-bottom-color:inherit;content:'';display:block;height:0;left:50%;margin-left:-10px;position:absolute;top:-20px;width:0;-webkit-filter:drop-shadow(0 -1px 0px rgba(0,0,0,0.6));-moz-filter:drop-shadow(0 -1px 0px rgba(0,0,0,0.6));filter:drop-shadow(0 -1px 0px rgba(0,0,0,0.6))}.input-alert.alert-error:after{border-bottom-color:#de5052}.input-alert.alert-success:after{border-bottom-color:#bada7a}.input-alert.alert-warning:after{border-bottom-color:#f9b169}.input-alert.alert-info:after{border-bottom-color:#62a1d8}.flexbox .input-prepend,.flexbox .input-append{display:-webkit-box;display:-moz-box;display:-ms-box;display:box}.flexbox .input-prepend input,.flexbox .input-prepend select,.flexbox .input-append input,.flexbox .input-append select{display:block;-webkit-box-flex:1;-moz-box-flex:1;-ms-box-flex:1;box-flex:1}.flexbox .input-prepend .btn,.flexbox .input-prepend .add-on,.flexbox .input-append .btn,.flexbox .input-append .add-on{display:block;-webkit-box-flex:0;-moz-box-flex:0;-ms-box-flex:0;box-flex:0}.ws-range,.ws-range *,.placeholder-box,.placeholder-text,.input-datetime-local,.input-buttons,.input-buttons *,.details-open-indicator,.ws-input-seperator,progress span.progress-value{margin:0;padding:0;border:none;width:auto;background:transparent none}output{position:relative}.placeholder-box{position:relative;display:inline-block;zoom:1}.polyfill-important .placeholder-box{position:relative !important;display:inline-block !important;margin:0 !important;padding:0 !important;width:auto !important;height:auto !important}.placeholder-box-input{vertical-align:bottom}.placeholder-box-left{float:left}.placeholder-box-right{float:right}.placeholder-text{position:absolute;display:none;top:0;left:0;overflow:hidden;color:#999;line-height:1;cursor:text}.polyfill-important .placeholder-text{margin:0 !important;padding-right:0 !important;padding-bottom:0 !important;display:none !important}.placeholder-visible .placeholder-text,.placeholder-text.placeholder-visible{display:inline-block}.placeholder-box-input .placeholder-text{white-space:nowrap}.placeholder-visible{color:#999}.placeholder-focused.placeholder-visible{color:#ccc}.polyfill-important .placeholder-visible .placeholder-text,.polyfill-important .placeholder-text.placeholder-visible{display:inline-block !important}.has-input-buttons{display:inline-block}.polyfill-important .has-input-buttons{display:inline-block !important}.input-buttons,.step-controls,.ws-popover-opener{zoom:1;overflow:hidden;display:inline-block;font-size:0;vertical-align:middle;margin-left:-20px}.step-controls,.ws-popover-opener{position:relative;float:left;margin:0;height:19px;width:15px}.ws-popover-opener{cursor:pointer;overflow:visible;margin:0;position:relative;width:20px;zoom:1}.ws-popover-opener:hover{background:none;border-color:transparent;color:#989898}.ws-popover-opener:before{content:'\f073';font-family:FontAwesome;font-size:15px}.ws-popover-opener span{display:none}.polyfill-important .input-buttons{display:inline-block !important;padding:0 !important;vertical-align:middle !important}.input-buttons.input-button-size-1.month-input-buttons,.input-buttons.input-button-size-1.date-input-buttons{margin-left:-24px}.input-buttons.input-button-size-2{margin-left:-39px}.input-buttons.ws-disabled{opacity:0.95}.input-buttons.ws-disabled *,.input-buttons.ws-readonly *{cursor:default}.step-controls span{border:4px solid transparent;position:absolute;display:inline-block;left:3px;overflow:hidden;margin:0 !important;padding:0 !important;cursor:pointer;font-size:0;line-height:0;height:0;width:0}.step-controls span:hover{border-bottom-color:#989898}.step-controls span.mousepress-ui{border-bottom-color:#2f7bbf}.ws-disabled .step-controls span{border-bottom-color:#cbcbcb}.polyfill-important .step-controls span{display:inline-block !important;margin:0 !important;padding:0 !important;font-size:0 !important}.step-controls span.step-up{border-bottom-color:#b1b1b1;top:0}.step-controls span.step-down{border-top-color:#b1b1b1;top:12px}.ws-input{letter-spacing:-0.31em;word-spacing:-0.43em}.ws-input>*{text-align:center;letter-spacing:normal;word-spacing:normal}.ws-input .ws-input-seperator{vertical-align:middle;width:2%;overflow:hidden}.ws-input+.input-buttons{margin-left:2px}.ws-input input,.ws-input .ws-input-seperator{text-align:center;display:inline-block}.polyfill-important .ws-input input,.polyfill-important .ws-input .ws-input-seperator{display:inline-block !important}.ws-date .mm,.ws-date .dd{width:23.5%}.no-boxsizing .ws-date .mm,.no-boxsizing .ws-date .dd{width:16%}.ws-date .yy{width:48%}.no-boxsizing .ws-date .yy{width:40%}.ws-month .mm,.ws-month .yy{width:47.9%}.no-boxsizing .ws-month .mm,.no-boxsizing .ws-month .yy{width:41%}.ws-range{position:relative;display:inline-block;vertical-align:middle;margin:0;zoom:1;height:1px;width:155px;border-radius:1px;cursor:pointer;font-size:0;line-height:0;top:12px}.ws-range:focus{outline:none}.polyfill-important .ws-range{display:inline-block !important;padding:0 !important;font-size:0 !important}.ws-range .ws-range-thumb{background-color:#7e7e7e;border:1px solid #333;top:-1px;position:absolute;display:block;z-index:4;overflow:hidden;margin:0 0 0 -7px;height:14px;width:14px;cursor:pointer;outline:none;font-size:0;line-height:0;-webkit-border-radius:50%;-moz-border-radius:50%;-ms-border-radius:50%;-o-border-radius:50%;border-radius:50%}.ws-range .ws-range-thumb:hover{border-color:#7e7e7e}.ws-range .ws-range-thumb:active{background-color:#333}.ws-range.ws-focus .ws-range-thumb{background-position:-20px 1px}.ws-range.ws-active .ws-range-thumb{background-position:-37px 1px}.ws-range[aria-disabled="true"],.ws-range[aria-readonly="true"]{cursor:default;opacity:0.95}.ws-range[aria-disabled="true"] .ws-range-thumb,.ws-range[aria-readonly="true"] .ws-range-thumb{cursor:default}.ws-range[aria-disabled="true"] .ws-range-thumb{background-position:-54px 1px}.ws-range .ws-range-rail{border-top:1px solid #b1b1b1;position:absolute;display:block;top:-10px;left:5px;right:5px;bottom:0;margin:0;zoom:1}.ws-range .ws-range-min{position:absolute !important;display:block;padding:0 !important;top:-10px;height:1px;left:0;z-index:1;overflow:hidden;background:#f68b1f}.ws-range .ws-range-ticks{overflow:hidden;position:absolute;bottom:0px;left:0;height:4px;width:1px;margin:0 0 0 -1.5px;font-size:0;line-height:0;text-indent:-999px;background:#ccc}.ws-range.vertical-range .ws-range-thumb:hover,.ws-range.vertical-range.ws-focus .ws-range-thumb{background-position:0 -34px}.ws-range.vertical-range.ws-active .ws-range-thumb{background-position:0 -17px}.ws-range.vertical-range[aria-disabled="true"] .ws-range-thumb{background-position:0 0}.ws-range.vertical-range .ws-range-min{top:auto;bottom:1px;left:0;width:1px;height:0}.ws-range.vertical-range .ws-range-rail{top:5px;left:0;right:0;bottom:5px}.ws-range.vertical-range .ws-range-ticks{bottom:auto;left:auto;right:0;height:1px;width:4px}.ws-popover{display:block;visibility:hidden;overflow:hidden;position:absolute;top:0;left:0;padding:0 6px;margin:0 0 0 -6px;z-index:1600;min-width:90px;transition:visibility 400ms ease-in-out}.ws-popover button{display:inline-block;overflow:visible;position:relative;margin:0;border:0;padding:0;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:none;appearance:none;box-sizing:content-box;font-family:arial, sans-serif;background:transparent;cursor:pointer}.ws-popover button::-moz-focus-inner{border:0;padding:0}.ws-popover button[disabled]{cursor:default;color:#888}.ws-popover.ws-po-visible{visibility:visible}.ws-po-outerbox{position:relative;opacity:0;padding:11px 0 4px;-webkit-transition:all 250ms ease-in-out;-moz-transition:all 250ms ease-in-out;-o-transition:all 250ms ease-in-out;transition:all 250ms ease-in-out}.ws-popover.ws-po-visible .ws-po-outerbox{opacity:1}.ws-po-box{border:1px solid #dedede;background:#fff;padding:0.5rem 1rem;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.ws-po-arrow{position:absolute;top:4px;left:20px;display:block;width:0;height:0;border-left:9px solid transparent;border-right:9px solid transparent;border-bottom:7px solid #ccc;border-top:none;zoom:1;font-size:0}html .ws-po-arrow{border-left-color:transparent;border-right-color:transparent}html .ws-po-arrow .ws-po-arrowbox{border-left-color:transparent;border-right-color:transparent}.polyfill-important .ws-po-arrow{border-left-color:transparent !important;border-right-color:transparent !important}.polyfill-important .ws-po-arrow .ws-po-arrowbox{border-left-color:transparent !important;border-right-color:transparent !important}* html .ws-po-arrow{display:none}.ws-po-arrow .ws-po-arrowbox{position:relative;top:1px;left:-9px;display:block;width:0;height:0;border-left:9px solid transparent;border-right:9px solid transparent;border-bottom:7px solid #fefefe;border-top:none;z-index:999999999}.validity-alert{display:inline-block;font-size:0.86667rem;margin:0;padding:0;z-index:1000000000}.validity-alert .ws-po-outerbox{padding:6px 0 0}.validity-alert .ws-po-box{background-color:#de5052;border:1px solid #521010;color:#fff}.validity-alert .ws-po-arrow{border-bottom-color:#521010;top:0}.validity-alert .ws-po-arrow .ws-po-arrowbox{border-bottom-color:#de5052}.input-picker{outline:none;text-align:center;font-family:sans-serif;width:300px}.input-picker.ws-size-2{width:538px}.input-picker.ws-size-3{width:796px}.input-picker abbr[title]{cursor:help}.input-picker li,.input-picker button{font-size:13px;line-height:16px;color:#000;transition:all 400ms}.input-picker .ws-focus,.input-picker :focus{outline:1px solid #2f7bbf}.input-picker .ws-po-box{position:relative;padding:0;box-shadow:0 0 6px rgba(0,0,0,0.1);-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.input-picker .ws-prev,.input-picker .ws-next{position:absolute;top:0;padding:0;width:40px;height:40px;right:0;z-index:1}.input-picker .ws-prev:after,.input-picker .ws-next:after{border:6px solid transparent;border-left-color:#333;content:'';left:50%;margin-left:-3px;margin-top:-6px;position:absolute;top:50%}.input-picker .ws-prev span,.input-picker .ws-next span{display:none}.input-picker .ws-picker-body{position:relative;padding:40px 0 0;zoom:1}.input-picker .ws-prev{left:0;right:auto}.input-picker .ws-prev:after{border-left-color:transparent;border-right-color:#333;margin-left:-10px}.input-picker .ws-button-row{position:relative;margin:10px 0 0;border-top:1px solid #dedede;text-align:left;z-index:2}.input-picker .ws-button-row button{padding:10px}.input-picker .ws-button-row button.ws-empty{float:right}.input-picker[data-currentview="setMonthList"] .ws-picker-header select{max-width:95%}.input-picker[data-currentview="setDayList"] .ws-picker-header select{max-width:40%}.input-picker[data-currentview="setDayList"] .ws-picker-header select.month-select{max-width:55%}.input-picker .ws-picker-header{position:absolute;top:-30px;right:0;left:0;margin:0 40px}.input-picker .ws-picker-header button{display:inline-block;width:100%;margin:0;padding:4px 0;font-weight:700}.input-picker .ws-picker-header button:hover{text-decoration:underline}.input-picker .ws-picker-header button[disabled]:hover{text-decoration:none}.input-picker .picker-grid{position:relative;zoom:1;overflow:hidden}.input-picker.ws-size-1 .picker-list{float:none;width:auto}.input-picker .picker-list{position:relative;zoom:1;width:238px;float:left;margin:0 10px}.input-picker .picker-list tr{border:0}.input-picker .picker-list th,.input-picker .picker-list td{padding:3px 5px}.input-picker .picker-list.day-list td{padding:2px 1px}.input-picker .picker-list td button{display:block;width:100%}.input-picker .picker-list td button.othermonth{color:#7e7e7e}.input-picker .picker-list table{width:100%;border:0 none;border-collapse:collapse}.input-picker .picker-list th,.input-picker .picker-list td.week-cell{font-size:13px;line-height:1.1em;padding-bottom:3px;text-transform:uppercase;font-weight:700}.input-picker .picker-list th,.input-picker .picker-list td{width:14.2856%}.input-picker .ws-options{margin:10px 0 0;border-top:1px solid #dedede;padding:10px 0 0;text-align:left}.input-picker .ws-options h5{margin:0 0 5px;padding:0;font-size:14px;font-weight:bold}.input-picker .ws-options ul,.input-picker .ws-options li{padding:0;margin:0;list-style:none}.input-picker .ws-options button{display:block;padding:2px 0;width:100%;text-align:left}.input-picker .ws-options button.ws-focus,.input-picker .ws-options button:focus,.input-picker .ws-options button:hover{text-decoration:underline}.input-picker .ws-options button[disabled],.input-picker .ws-options button[disabled].ws-focus,.input-picker .ws-options button[disabled]:focus,.input-picker .ws-options button[disabled]:hover{color:#7e7e7e;text-decoration:none}datalist{display:none}.datalist-polyfill{position:absolute !important;font-size:100%}.datalist-polyfill .datalist-box{position:relative;max-height:200px;overflow:hidden;overflow-x:hidden !important;overflow-y:auto}.datalist-polyfill .ws-po-box{padding:0}.datalist-polyfill ul,.datalist-polyfill li{font-size:100%;list-style:none !important}.datalist-polyfill ul{position:static !important;overflow:hidden;margin:0;padding:0;height:auto !important;background-color:#fff;color:#333}.datalist-polyfill li{margin:0;padding:0.25em 0.5em;overflow:hidden;white-space:nowrap;cursor:default;zoom:1;overflow:hidden;text-overflow:ellipsis;background-color:#fff;transition:background-color 250ms}.datalist-polyfill mark{font-weight:normal;font-style:normal}.datalist-polyfill .option-value{display:inline-block;text-overflow:ellipsis;max-width:100%;color:#333;float:left;transition:color 250ms}.datalist-polyfill .option-label{display:none;max-width:100%;float:right;font-size:90%;color:#7e7e7e;text-overflow:ellipsis;vertical-align:bottom;margin-top:0.15em;margin-left:10px;text-align:right;transition:color 400ms}.datalist-polyfill .has-option-label .option-label{display:inline-block}.datalist-polyfill .hidden-item{display:none !important}.datalist-polyfill .active-item{background-color:#2f7bbf;cursor:default}.datalist-polyfill .active-item .option-value{color:#fff}.datalist-polyfill .active-item .option-label{color:#dedede}progress{border:0;display:inline-block;height:12px;position:relative;width:auto;-webkit-appearance:none;-moz-appearance:none;appearance:none}progress[data-position]{background:#f5f5f5;border:none;vertical-align:-0.2em}progress>*{display:none}progress span.progress-value{background:#2f7bbf;display:block !important;top:0;left:0;bottom:0;height:100%;position:absolute}progress[aria-valuenow] span.progress-value{background:#2f7bbf}progress:indeterminate{background-color:#f68b1f;color:#f68b1f}@media screen and (min-width: 49.2em){form .columns.two>.column,form .columns.cols-2>.column,form .columns.four>.column,form .columns.cols-4>.column{padding-left:0;padding-right:0.45714em;width:50%}form .columns.two>.column:nth-child(even),form .columns.cols-2>.column:nth-child(even),form .columns.four>.column:nth-child(even),form .columns.cols-4>.column:nth-child(even){padding-left:0.45714em;padding-right:0}.form-horizontal legend{padding-left:30%;width:70%}.form-horizontal .control-label{color:#333;float:left;font-size:0.86667rem;margin-bottom:0;padding:0.6em 1.5rem 0.46667em 0;text-align:right;width:30%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.form-horizontal .control-label label{display:block}.form-horizontal .checkbox-label{padding-top:0}.form-horizontal .controls{margin-left:30%;width:70%}.form-horizontal .controls .columns{overflow:hidden}}code,pre{background-color:#e8e8e8;border:1px solid #dbdbdb;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}pre{color:#4d4d4d;display:block;font-family:monaco, courier, monospace;font-size:0.86667rem;margin:2rem 0;overflow:auto;padding:0.5rem;width:100%;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}code{color:#bd2426;margin-left:0.13333em;margin-right:0.13333em;padding:0 0.4em;vertical-align:baseline;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.pagination .btn{background-color:transparent;border-color:transparent;color:#2f7bbf;padding-left:1em;padding-right:1em}.pagination .btn:hover,.pagination .input-picker .picker-list td button.checked-value,.input-picker .picker-list td .pagination button.checked-value{background-color:transparent;border-color:transparent;color:#f68b1f}.pagination .btn.active,.pagination .btn:active{background-color:transparent;border-color:transparent;color:#a1a1a1}.pagination .btn.inactive,.pagination .btn.disabled,.pagination .btn.loading,.pagination .btn[disabled]{background-color:transparent;border-color:transparent;color:#a1a1a1}.pagination .btn+.pagination-set,.pagination .pagination-set+.btn{margin-left:0.4rem}.pagination li+li{margin-left:0.4rem}.pagination .num-break{color:#a1a1a1;cursor:default;padding-left:0;padding-right:0}.pagination,.pagination ol,.pagination ul,.pagination li{list-style:none;margin:0;padding:0}.pagination ol,.pagination ul,.pagination li{display:inline;font-size:0}.caret{border:0.33333em solid transparent;border-top-color:inherit;content:"";height:0;width:0;vertical-align:top;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.dropup,.dropdown{position:relative}.dropup .caret,.dropdown .caret{margin-top:0.25em;margin-left:0.13333em}.dropup .caret{border-top-color:transparent;border-bottom-color:inherit}.dropdown-toggle:active,.open .dropdown-toggle{outline:0}.dropdown-menu{background-color:#fff;border:1px solid #dedede;display:none;float:left;left:0;list-style:none;opacity:0;margin:5px 0 0;min-width:10.66667rem;padding:0.33333rem 0;position:absolute;top:102%;z-index:1040;*border-right-width:2px;*border-bottom-width:2px;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px;-webkit-box-shadow:0 3px 10px rgba(0,0,0,0.2);-moz-box-shadow:0 3px 10px rgba(0,0,0,0.2);box-shadow:0 3px 10px rgba(0,0,0,0.2);background-clip:padding-box;-webkit-animation:menuTransition 0.15s ease-out;-moz-animation:menuTransition 0.15s ease-out;-ms-animation:menuTransition 0.15s ease-out;-o-animation:menuTransition 0.15s ease-out;animation:menuTransition 0.15s ease-out}.dropdown-menu:before{border:10px solid transparent;border-bottom-color:#fff;content:'';left:1rem;height:0;position:absolute;top:-20px;width:0}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu.pull-right:before{left:auto;right:1rem}.dropdown-menu .divider{background-color:#dedede;height:1px;margin:0.53333rem 0;overflow:hidden}.dropdown-menu li>a{clear:both;display:block;line-height:1.5;padding:0.2rem 1.06667rem;white-space:nowrap}.dropdown-menu li>a:hover,.dropdown-menu li>a:focus,.dropdown-menu li>a:active{color:#fff}.dropdown-menu li>a:hover{background-color:#2f7bbf}.dropdown-menu li>a:focus{background-color:#62a1d8}.dropdown-menu li>a:active{background-color:#c16508}.open .dropdown-menu{display:block;opacity:1}.menu-sidebar{list-style:none;margin:0;padding:0}.menu-sidebar li{background:#fff}.menu-sidebar li a{display:block}.menu-sidebar li.active>a,.menu-sidebar a:hover{background-color:#2f7bbf;color:#fff}.menu-sidebar li.active>a:after,.menu-sidebar a:hover:after{border-left-color:#fff}.menu-sidebar>li{background-clip:border-box}.menu-sidebar>li+li{margin-top:1px}.menu-sidebar>li:first-child,.menu-sidebar>li:first-child>a{-moz-border-radius-topleft:2px;-webkit-border-top-left-radius:2px;border-top-left-radius:2px;-moz-border-radius-topright:2px;-webkit-border-top-right-radius:2px;border-top-right-radius:2px}.menu-sidebar>li:last-child,.menu-sidebar>li:last-child>a{-moz-border-radius-bottomleft:2px;-webkit-border-bottom-left-radius:2px;border-bottom-left-radius:2px;-moz-border-radius-bottomright:2px;-webkit-border-bottom-right-radius:2px;border-bottom-right-radius:2px}.menu-sidebar>li>a{padding:1rem 2rem 1rem 1rem;position:relative}.menu-sidebar>li>a:after{border:0.4rem solid transparent;border-left-color:#2f7bbf;content:'';display:block;height:0;margin-top:-0.4rem;position:absolute;top:50%;right:0.75rem;width:0;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.menu-sidebar>li.current-menu-ancestor>a:after{border-left-color:transparent;border-top-color:#fff;margin-top:-0.3rem}.menu-sidebar+.menu-sidebar{margin-top:1.5rem}.sub-menu{overflow:hidden;margin:0;padding:0}.sub-menu li:last-child{padding-bottom:0.75rem}.sub-menu a{padding:0.25rem;padding-left:2rem}.sub-menu li.active>a,.sub-menu a:hover{background-color:#3988ce}.sub-menu .sub-menu li:last-child{padding-bottom:0}.sub-menu .sub-menu a{padding-left:3rem}.sub-menu .sub-menu a:before{content:'\21B3';padding-right:0.26667rem}.js .menu-sidebar .sub-menu{display:none}.js .menu-sidebar .active>.sub-menu{display:block}.logo{background:transparent url('../images/cloudflare-sprite-small.png') 0 0 no-repeat;display:inline-block;overflow:hidden;text-indent:-9999em;height:60px;width:240px}.close{color:#7e7e7e;cursor:pointer;display:inline-block;font-size:2.3rem;float:right;height:1.5rem;line-height:0.6;overflow:hidden;position:relative;text-indent:200%;width:1.5rem;-webkit-transition:all 0.2s ease;-moz-transition:all 0.2s ease;-o-transition:all 0.2s ease;transition:all 0.2s ease}.close:hover{color:#656565}.close:before{content:'\00D7';left:0;height:100%;position:absolute;text-align:center;text-indent:0;top:0;width:100%}.cf-proxied,.cf-unproxied,.cf-unproxiable{background:transparent url('../images/cloudflare-sprite-small.png') no-repeat;cursor:pointer;overflow:hidden;padding:0;text-indent:200%;height:34px;width:55px;background-position:0 -188px;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.cf-unproxied{background-position:0 -120px}.cf-unproxiable{background-position:0 -154px}@media (-webkit-min-device-pixel-ratio: 1.3), (-o-min-device-pixel-ratio: 2.6 / 2), (min--moz-device-pixel-ratio: 1.3), (min-device-pixel-ratio: 1.3), (min-resolution: 1.3dppx){.logo,.cf-proxied,.cf-unproxied{background-image:url('../images/cloudflare-sprite-small.png')}.logo{background-position:0 -122px;-webkit-background-size:100% auto;-moz-background-size:100% auto;-o-background-size:100% auto;background-size:100% auto}.cf-proxied{background-position:0 -380px;-webkit-background-size:55px,auto;-moz-background-size:55px,auto;-o-background-size:55px,auto;background-size:55px,auto}.cf-unproxied{background-position:0 -244px;-webkit-background-size:55px,auto;-moz-background-size:55px,auto;-o-background-size:55px,auto;background-size:55px,auto}.cf-unproxiable{background-position:0 -312px;-webkit-background-size:55px,auto;-moz-background-size:55px,auto;-o-background-size:55px,auto;background-size:55px,auto}}.header{background-color:#333;border-bottom:1px solid #1a1a1a;color:#fff;height:60px;margin:0;padding:0;position:relative;top:0;z-index:1000}.header-main .btn{font-size:0.93333rem}.logo-header{display:block;margin:0 auto;background-position:0 -60px}.header-navigation{display:none;font-size:0.93333rem}.header-navigation li{position:relative}.header-navigation li.btn{padding:0}.header-navigation a{color:#fff;display:block;padding:0.4rem 1rem}.header-navigation a:hover{background-color:#4d4d4d}.icon-menu{cursor:pointer;height:60px;left:0;overflow:hidden;position:absolute;text-indent:200%;top:0;width:60px;white-space:nowrap}.icon-menu:before{background:transparent url('../images/cloudflare-sprite-small.png') 0 -222px no-repeat;content:'';display:block;height:100%;left:50%;margin-left:-10px;margin-top:-10px;position:absolute;top:50%;width:100%}@media screen and (-webkit-min-device-pixel-ratio: 1.3), (-o-min-device-pixel-ratio: 2.6 / 2), (min--moz-device-pixel-ratio: 1.3), (min-device-pixel-ratio: 1.3), (min-resolution: 1.3dppx){.logo-header{}}@media screen and (min-width: 49.2em){.mobile-navigation{display:none}.header{background-color:#fff;border-bottom:0;border-top:3px solid #f68b1f;color:#333;height:auto;margin-bottom:2.66667rem}.header a:hover,.header li.active a{color:#333}.header .menu li:hover .sub-menu{display:block}.header-main{padding:0.5rem 0}.logo-header{background-position:0 0}.header-navigation{display:block;text-align:center;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.header-navigation li{display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.header-navigation a{color:#2f7bbf;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.header-navigation a:hover{background-color:transparent}.header-navigation .btn{color:#fff;padding:0.6em 1.33333em 0.53333em}.header-navigation .sub-menu{background-color:#fff;border:1px solid #dedede;border-top:0;display:none;left:0;min-width:100%;position:absolute;top:100%;width:13.33333rem;-webkit-border-radius:0 0 3px 3px;-moz-border-radius:0 0 3px 3px;-ms-border-radius:0 0 3px 3px;-o-border-radius:0 0 3px 3px;border-radius:0 0 3px 3px}.header-navigation .sub-menu li{display:block}.header-navigation .sub-menu li:last-child{padding-bottom:0}.header-navigation .sub-menu a{display:block;padding:0.53333em 0.8em;-webkit-border-radius:0;-moz-border-radius:0;-ms-border-radius:0;-o-border-radius:0;border-radius:0}.header-navigation .sub-menu a:hover{color:#fff}}@media screen and (min-width: 66em){.header{text-align:left}.logo-header{float:left}.header-navigation{float:right;line-height:60px;text-align:left}.header-navigation li{line-height:1.5;vertical-align:middle}}.footer{background-color:#fff;margin-top:1.33333rem;padding-bottom:2.33333rem;padding-top:2.33333rem}.footer-nav{font-size:0.86667rem}.footer-column{float:left;list-style:none;margin-left:1%;margin-right:1%;width:48%}.footer-column+.footer-column{margin-bottom:1rem}.footer-language-select{margin:0 auto 1.33333rem;width:13.6rem}.footer-language-select select,.footer-language-select .select2-container{width:100%}@media screen and (min-width: 49.2em){.footer{margin-top:2.66667rem}.footer-column{float:left;margin-left:0.5%;margin-right:0.5%;width:19%}.footer-column+.footer-column{margin-bottom:0}}@media screen and (min-width: 66em){.footer-language-select{float:left;margin-bottom:0}.footer-nav{float:left;width:47.2rem}}.modal-backdrop{background-color:#000;bottom:0;left:0;position:fixed;top:0;right:0;z-index:10;-webkit-transition:opacity 0.2s linear;-moz-transition:opacity 0.2s linear;-o-transition:opacity 0.2s linear;transition:opacity 0.2s linear}.modal-backdrop.fade{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=0);opacity:0}.modal-backdrop,.modal-backdrop.fade.in{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=70);opacity:0.7}.modal-container{display:block;min-height:100%;position:relative;width:100%}.modal{background-color:#fff;left:50%;margin:0 0 0 -15rem;max-width:95%;outline:none;position:fixed;top:10%;width:30rem;z-index:20;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px;-webkit-box-shadow:0 1px 15px rgba(0,0,0,0.75);-moz-box-shadow:0 1px 15px rgba(0,0,0,0.75);box-shadow:0 1px 15px rgba(0,0,0,0.75);-webkit-transition:opacity 0.25s linear;-moz-transition:opacity 0.25s linear;-o-transition:opacity 0.25s linear;transition:opacity 0.25s linear}.modal.fade{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=0);opacity:0}.modal.fade.in,.modal.visible{filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=100);opacity:1}.modal-header,.modal-content,.modal-footer,.modal-body-section{padding:1.5rem;position:relative}.modal-header{padding:1.5rem}.modal-header ~ .modal-body,.modal-header ~ .modal-content,.modal-header ~ .modal-body-section,.modal-header ~ .modal-body .modal-content{padding-top:0}.modal-body+.modal-body,.modal-body+.modal-body-section,.modal-body-section+.modal-body,.modal-body-section+.modal-body-section{border-top:1px solid #f5f5f5;padding-top:1.5rem}.modal-close{position:absolute;right:1.5rem;top:1.5rem}.modal-title{font-weight:400}.modal-body{overflow-y:auto;max-height:100%}.modal-section{background-color:#ebebeb;border:1px solid #dedede;border-left:0;border-right:0}.modal-section .control-group{padding:1rem 1.5rem}.modal-section .control-group+.control-group{border-top:1px solid #dedede}.modal-footer{background-color:#f5f5f5}.footer-simple{background-color:transparent}.modal-actions{float:right}.modal-nonessential{line-height:2.2rem;vertical-align:middle}.modal-confirm .modal-footer{padding-top:0}body.modal-active{overflow:hidden}#overlays{height:0;left:0;overflow-y:auto;position:absolute;top:0;width:100%;z-index:1500}table{background-color:#fff;border-collapse:collapse;border-spacing:0;max-width:100%}thead{background-color:#dedede}thead,thead a{color:#333}thead a{cursor:pointer;display:block;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}thead input,thead select,thead textarea{border-color:#989898}thead td:first-child,thead th:first-child{-webkit-border-radius:2px 0 0 0;-moz-border-radius:2px 0 0 0;-ms-border-radius:2px 0 0 0;-o-border-radius:2px 0 0 0;border-radius:2px 0 0 0}thead td:last-child,thead th:last-child{-webkit-border-radius:0 2px 0 0;-moz-border-radius:0 2px 0 0;-ms-border-radius:0 2px 0 0;-o-border-radius:0 2px 0 0;border-radius:0 2px 0 0}thead.inverse{background-color:#7e7e7e}thead.inverse,thead.inverse a,thead.inverse .sort-caret{color:#fff}thead .sortable:hover{background-color:#d2d2d2}thead .sortable:hover,thead .sortable:hover a{color:#1a1a1a}.sort-caret{border:4px solid transparent;content:"";display:inline-block;height:0;margin-left:0.5em;width:0;vertical-align:middle}.ascending .sort-caret{border-bottom-color:#333;margin-top:-4px}.descending .sort-caret{border-top-color:#333;margin-top:4px}.table,.table-container{width:100%}.table{margin-bottom:1.5rem}.table th,.table td{border-top:1px solid #d2d2d2;line-height:1.5;padding:0.86667rem;vertical-align:middle}.table th{font-weight:600}.table thead th{vertical-align:bottom}.table caption+thead tr:first-child th,.table caption+thead tr:first-child td,.table colgroup+thead tr:first-child th,.table colgroup+thead tr:first-child td,.table thead:first-child tr:first-child th,.table thead:first-child tr:first-child td{border-top:0}.table tbody+tbody{border-top:2px solid #d2d2d2}.table-condensed th,.table-condensed td{padding:0.43333rem 0.93333rem}.table-bordered{border:1px solid #d2d2d2;border-collapse:separate;*border-collapse:collapse;border-left:0;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.table-bordered th,.table-bordered td{border-left:1px solid #d2d2d2}.table-bordered caption+thead tr:first-child th,.table-bordered caption+tbody tr:first-child th,.table-bordered caption+tbody tr:first-child td,.table-bordered colgroup+thead tr:first-child th,.table-bordered colgroup+tbody tr:first-child th,.table-bordered colgroup+tbody tr:first-child td,.table-bordered thead:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child td{border-top:0}.table-bordered thead:first-child tr:first-child>th:first-child,.table-bordered tbody:first-child tr:first-child>td:first-child,.table-bordered tbody:first-child tr:first-child>th:first-child{-webkit-border-radius:2px 0 0 0;-moz-border-radius:2px 0 0 0;-ms-border-radius:2px 0 0 0;-o-border-radius:2px 0 0 0;border-radius:2px 0 0 0}.table-bordered thead:first-child tr:first-child>th:last-child,.table-bordered tbody:first-child tr:first-child>td:last-child,.table-bordered tbody:first-child tr:first-child>th:last-child{-webkit-border-radius:0 2px 0 0;-moz-border-radius:0 2px 0 0;-ms-border-radius:0 2px 0 0;-o-border-radius:0 2px 0 0;border-radius:0 2px 0 0}.table-bordered thead:last-child tr:last-child>th:first-child,.table-bordered tbody:last-child tr:last-child>td:first-child,.table-bordered tbody:last-child tr:last-child>th:first-child,.table-bordered tfoot:last-child tr:last-child>td:first-child,.table-bordered tfoot:last-child tr:last-child>th:first-child{-webkit-border-radius:0 0 0 2px;-moz-border-radius:0 0 0 2px;-ms-border-radius:0 0 0 2px;-o-border-radius:0 0 0 2px;border-radius:0 0 0 2px}.table-bordered thead:last-child tr:last-child>th:last-child,.table-bordered tbody:last-child tr:last-child>td:last-child,.table-bordered tbody:last-child tr:last-child>th:last-child,.table-bordered tfoot:last-child tr:last-child>td:last-child,.table-bordered tfoot:last-child tr:last-child>th:last-child{-webkit-border-radius:0 0 2px 0;-moz-border-radius:0 0 2px 0;-ms-border-radius:0 0 2px 0;-o-border-radius:0 0 2px 0;border-radius:0 0 2px 0}.table-bordered tfoot+tbody:last-child tr:last-child td:first-child{-webkit-border-radius:0;-moz-border-radius:0;-ms-border-radius:0;-o-border-radius:0;border-radius:0}.table-bordered tfoot+tbody:last-child tr:last-child td:last-child{-webkit-border-radius:0;-moz-border-radius:0;-ms-border-radius:0;-o-border-radius:0;border-radius:0}.table-bordered caption+thead tr:first-child th:first-child,.table-bordered caption+tbody tr:first-child td:first-child,.table-bordered colgroup+thead tr:first-child th:first-child,.table-bordered colgroup+tbody tr:first-child td:first-child{-webkit-border-radius:2px 0 0 0;-moz-border-radius:2px 0 0 0;-ms-border-radius:2px 0 0 0;-o-border-radius:2px 0 0 0;border-radius:2px 0 0 0}.table-bordered caption+thead tr:first-child th:last-child,.table-bordered caption+tbody tr:first-child td:last-child,.table-bordered colgroup+thead tr:first-child th:last-child,.table-bordered colgroup+tbody tr:first-child td:last-child{-webkit-border-radius:0 2px 0 0;-moz-border-radius:0 2px 0 0;-ms-border-radius:0 2px 0 0;-o-border-radius:0 2px 0 0;border-radius:0 2px 0 0}.table-striped tbody>tr:nth-child(even)>td,.table-striped tbody>tr:nth-child(even)>th{background-color:#fafafa}.table-hover tbody tr:hover>td,.table-hover tbody tr:hover>th{background-color:#f0f0f0}.table-bare td{border-top:0}.table tbody tr td.success,.table tbody tr.success>td{background-color:#e6f2d0}.table tbody tr td.error,.table tbody tr.error>td{background-color:#f3c1c2}.table tbody tr td.warning,.table tbody tr.warning>td{background-color:#fff5db}.table tbody tr td.info,.table tbody tr.info>td{background-color:#c8def1}tr.fade td{-webkit-animation:bgFadeOut 1.5s ease 1;-moz-animation:bgFadeOut 1.5s ease 1;-ms-animation:bgFadeOut 1.5s ease 1;-o-animation:bgFadeOut 1.5s ease 1;animation:bgFadeOut 1.5s ease 1}.table-hover td{-webkit-transition:background-color 0.2s ease;-moz-transition:background-color 0.2s ease;-o-transition:background-color 0.2s ease;transition:background-color 0.2s ease}.table-hover tbody tr td.success:hover,.table-hover tbody tr.success:hover>td{background-color:#dbecbc}.table-hover tbody tr td.error:hover,.table-hover tbody tr.error:hover>td{background-color:#efacad}.table-hover tbody tr td.warning:hover,.table-hover tbody tr.warning:hover>td{background-color:#ffeec2}.table-hover tbody tr td.info:hover,.table-hover tbody tr.info:hover>td{background-color:#b4d2ec}td.editable:hover{cursor:text;outline:1px dotted #a4a4a4;outline-offset:-5px}td.editor{padding:0}td.editor,td.editor.editable{background-color:#fff;outline:1px solid #a4a4a4;outline-offset:-5px}td.editor input,td.editor select{background:transparent;border:0;display:block;font-size:1em;height:100%;margin:0;padding:0.86667rem;width:100%}td.editor input:focus,td.editor select:focus{outline:none;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}td.select-cell,td.select-cell.editable.editor,td.select-cell.editor:hover,td.select2-cell,td.select2-cell.editable.editor,td.select2-cell.editor:hover,td.edit-always,td.edit-always.editable.editor,td.edit-always.editor:hover,td.proxy-cell,td.proxy-cell.editable.editor,td.proxy-cell.editor:hover,td.boolean-cell,td.boolean-cell.editable.editor,td.boolean-cell.editor:hover{outline:none}td.boolean-cell,td.select-row-cell,th.select-all-header-cell{text-align:center;width:1.13333em}.text-cell{max-width:25em;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.select2-cell .select2-container{width:100%}.icon-cell{text-align:center;width:1.6em}.icon-cell img{height:auto;max-width:100%}.icon-cell:first-child{padding-right:0}.icon-cell:last-child{padding-left:0}.table-top:empty,.table-content:empty,.table-pagination:empty,.table-meta:empty,.table-info:empty,.table-bottom:empty{display:none}.table-meta .pagination a,.table-meta .link-nav-list a{font-size:1rem;line-height:1.5}@media screen and (max-width: 49.2em){.table-content{max-width:100%;overflow-x:auto}}@media screen and (min-width: 49.2em){.table-pagination{float:left}.table-info{float:right}}.modunit{background-color:#fff;margin-top:1.5rem;margin-bottom:1.5rem;padding:0;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px;-webkit-transition:all 0.35s ease;-moz-transition:all 0.35s ease;-o-transition:all 0.35s ease;transition:all 0.35s ease}.modunit.needs-upgrade .mod-setting-control:after{background-color:#2f7bbf;content:'\0024';color:#fff;height:1.5rem;position:absolute;right:0;text-align:center;top:0;width:1.5rem;-webkit-border-radius:0 2px 0 2px;-moz-border-radius:0 2px 0 2px;-ms-border-radius:0 2px 0 2px;-o-border-radius:0 2px 0 2px;border-radius:0 2px 0 2px}.modunit.extended{margin-left:auto;margin-right:auto;width:98%}.modunit.highlighted{-webkit-box-shadow:0 0 20px rgba(0,0,0,0.15);-moz-box-shadow:0 0 20px rgba(0,0,0,0.15);box-shadow:0 0 20px rgba(0,0,0,0.15)}.modunit>*+*{border-top:1px solid #f5f5f5}.modunit>*:empty{display:none !important}.mod-content{padding:1.5rem}.mod-content+.mod-content{padding-top:0}.mod-content hr{margin:1.5rem 0}.mod-header{padding:1.5rem;position:relative}.mod-header:only-child{border-bottom:0}.mod-title{font-weight:400;margin-bottom:1rem}.mod-title small{padding-left:0.4rem;white-space:nowrap}.ancillary-info{color:#dedede;line-height:1;position:absolute;right:1.5rem;top:1.5rem}.ancillary-info i{cursor:pointer;font-size:1.25rem;margin:0;vertical-align:middle;-webkit-transition:color 150ms ease;-moz-transition:color 150ms ease;-o-transition:color 150ms ease;transition:color 150ms ease}.ancillary-info i:hover{color:#7e7e7e}.ancillary-info i+i{margin-left:0.26667rem}.mod-row{background-color:#fff;clear:both;font-size:0;padding:0.86667rem;position:relative;white-space:nowrap;width:100%}.striped .mod-row:nth-child(even),.mod-row.stripe{background-color:#f7f7f7}.mod-row.ui-toolbar{margin:0;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.mod-cell{display:block;font-size:1rem;overflow:visible;padding:0;position:relative;white-space:normal}.mod-cell .select2-container{width:100%}.mod-cell+.mod-cell,.mod-cell+.ui-group{padding-left:0.4rem}.mod-cell:first-child{padding-left:0}.mod-cell:only-child{width:100%}.input-row{-webkit-box-align:stretch;-moz-box-align:stretch;-ms-box-align:stretch;box-align:stretch}.input-row .mod-cell{width:100%}.input-row .mod-cell+.mod-cell,.input-row .mod-cell+.ui-group{padding-left:0;margin-top:0.5rem}.mod-cell,.cell-primary,.cell-actions{width:auto}.cell-icon{height:100%;text-align:center}.cell-primary input,.cell-primary select,.cell-primary textarea{width:100%}.cell-input input,.cell-input select,.cell-input textarea{margin-bottom:0}.simple-actions{text-align:right}.mod-table-adjustable .mod-cell,.mod-row-adjustable .mod-cell{width:100%}.mod-table-adjustable .mod-cell+.mod-cell,.mod-table-adjustable .mod-cell+.ui-group,.mod-row-adjustable .mod-cell+.mod-cell,.mod-row-adjustable .mod-cell+.ui-group{padding-left:0;margin-top:0.4rem}.mod-setting{display:table;width:100%}.mod-setting .mod-header,.mod-setting .mod-setting-control{display:table-cell;vertical-align:middle}.input-row,.mod-setting,.mod-setting-control,.mod-table-adjustable .mod-row{-webkit-box-orient:vertical;-moz-box-orient:vertical;-ms-box-orient:vertical;box-orient:vertical}.mod-setting-control{background-color:rgba(0,0,0,0.02);border-left:1px solid #f5f5f5;padding:2rem;position:relative;text-align:center;-moz-border-radius-topright:2px;-webkit-border-top-right-radius:2px;border-top-right-radius:2px}.mod-radio-group,.mod-checkbox-group{text-align:left;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.mod-radio-group label,.mod-checkbox-group label{font-size:1rem}.mod-radio-group label+label,.mod-checkbox-group label+label{margin-top:1em}.mod-radio-group input[type="radio"],.mod-checkbox-group input[type="checkbox"]{margin-top:0.4em}.mod-control-group.mod-setting-control{padding:0;text-align:left}.mod-control-group .ui-block{margin-bottom:0;padding:1rem 1.4rem 1.26667rem;width:100%}.mod-control-group .ui-block+.ui-block{border-top:1px solid #e8e8e8;margin-left:0}.mod-control-group label{font-weight:300}.mod-toolbar{color:#7e7e7e;overflow:hidden;position:relative}.mod-notification,.modunit .link-nav-list a{padding:1.11667rem 1.5rem;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.mod-notification{max-width:65%}.modunit .link-nav-list{float:right;height:100%;list-style:none;margin:0;padding:0;text-align:right}.modunit .link-nav-list li{display:inline}.modunit .link-nav-list li a{border-left:1px solid #f5f5f5}.mod-more-link,.modunit .link-nav-list .mod-more-link{padding-right:2.25em;position:relative}.mod-more-link:after,.modunit .link-nav-list .mod-more-link:after{content:'\f0da';font-family:FontAwesome;font-style:normal;font-weight:normal;margin-top:-0.6em;position:absolute;top:50%;right:1rem;-webkit-transition:all 0.2s ease;-moz-transition:all 0.2s ease;-o-transition:all 0.2s ease;transition:all 0.2s ease}.mod-more-link.active,.modunit .link-nav-list .mod-more-link.active{color:#f68b1f}.mod-more-link.active:after,.modunit .link-nav-list .mod-more-link.active:after{content:'\f0d7'}.cssanimations .mod-more-link.active:after,.cssanimations .modunit .link-nav-list .mod-more-link.active:after{content:'\f0da';-webkit-transform:rotate(90deg);-moz-transform:rotate(90deg);-ms-transform:rotate(90deg);-o-transform:rotate(90deg);transform:rotate(90deg)}.modunit-exception+.modunit-exception{border-top:1px solid #f5f5f5}.modunit.loading .mod-setting-control{pointer-events:none;-webkit-user-select:none;-moz-user-select:none;user-select:none}.modunit.loading .mod-setting-control:before{background:transparent url('../images/spinner.gif') no-repeat 1.5rem 1.5rem;content:'';cursor:wait;display:block;height:100%;left:0;position:absolute;top:0;width:100%;z-index:10;filter:progid:DXImageTransform.Microsoft.Alpha(Opacity=80);opacity:0.8;-webkit-user-select:none;-moz-user-select:none;user-select:none}.modunit .table{margin-bottom:0}.modunit .table-top .ui-toolbar{padding-left:0.86667rem;padding-right:0.86667rem}.modunit .table-meta{border-top:1px solid #f5f5f5}.modunit .table-pagination{overflow-x:auto;max-width:100%;padding:0 0.86667rem;white-space:nowrap}.modunit .pagination .btn{border:0;padding:1.11667rem 0.5em;vertical-align:baseline;-webkit-border-radius:0;-moz-border-radius:0;-ms-border-radius:0;-o-border-radius:0;border-radius:0}.modunit .pagination .btn.disabled,.modunit .pagination .btn.loading{background-color:transparent}.modunit .pagination.centered{text-align:center}.modunit .pagination li+li{margin-left:0}.flexbox .mod-row,.flexbox .mod-toolbar,.flexbox .mod-toolbar-menu{display:-webkit-box;display:-moz-box;display:-ms-box;display:box}.flexbox .mod-row{-webkit-box-pack:start;-moz-box-pack:start;-ms-box-pack:start;box-pack:start;-webkit-box-align:center;-moz-box-align:center;-ms-box-align:center;box-align:center}.flexbox .mod-cell,.flexbox .mod-row>.ui-group{display:block;float:none;-webkit-box-flex:0;-moz-box-flex:0;-ms-box-flex:0;box-flex:0}.flexbox .cell-primary{-webkit-box-flex:10;-moz-box-flex:10;-ms-box-flex:10;box-flex:10}.flexbox .mod-setting{-webkit-box-align:stretch;-moz-box-align:stretch;-ms-box-align:stretch;box-align:stretch;-webkit-flex-align:stretch;-moz-flex-align:stretch;-ms-flex-align:stretch;flex-align:stretch}.flexbox .mod-setting .mod-header{-webkit-box-flex:1;-moz-box-flex:1;-ms-box-flex:1;box-flex:1}.flexbox .mod-setting,.flexbox .mod-setting .mod-setting-control{display:-webkit-box;display:-moz-box;display:-ms-box;display:box;-webkit-box-pack:center;-moz-box-pack:center;-ms-box-pack:center;box-pack:center}.flexbox .mod-setting .mod-header{display:block}.flexbox .mod-setting-control{-webkit-box-align:start;-moz-box-align:start;-ms-box-align:start;box-align:start;-webkit-flex-align:start;-moz-flex-align:start;-ms-flex-align:start;flex-align:start;-webkit-box-flex:0;-moz-box-flex:0;-ms-box-flex:0;box-flex:0}.flexbox .mod-toolbar .mod-notification,.flexbox .mod-toolbar .mod-toolbar-menu li{display:block}.flexbox .mod-toolbar .mod-notification{-webkit-box-align:start;-moz-box-align:start;-ms-box-align:start;box-align:start;-webkit-flex-align:start;-moz-flex-align:start;-ms-flex-align:start;flex-align:start;-webkit-box-flex:4;-moz-box-flex:4;-ms-box-flex:4;box-flex:4}.flexbox .mod-toolbar .mod-toolbar-menu{float:none;-webkit-box-flex:1;-moz-box-flex:1;-ms-box-flex:1;box-flex:1;-webkit-box-pack:end;-moz-box-pack:end;-ms-box-pack:end;box-pack:end}@media screen and (min-width: 49.2em){.modunit{margin-top:3rem;margin-bottom:3rem}.modunit .table-pagination{overflow-x:visible}.mod-cell{display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.cell-icon{width:3%}.cell-primary{line-height:2;overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.cell-expanded{line-height:1.5;overflow:visible;white-space:normal}.cell-input{line-height:1.5;overflow:visible}.input-row{-webkit-box-align:center;-moz-box-align:center;-ms-box-align:center;box-align:center}.input-row .mod-cell{width:auto}.input-row .mod-cell+.mod-cell,.input-row .mod-cell+.ui-group{margin-top:0;padding-left:0.4rem}.mod-control{width:10rem}.singular-row .mod-cell{margin-left:0;width:100%}.mod-table-adjustable .mod-cell,.mod-row-adjustable .mod-cell{width:auto}.mod-table-adjustable .mod-cell+.mod-cell,.mod-table-adjustable .mod-cell+.ui-group,.mod-row-adjustable .mod-cell+.mod-cell,.mod-row-adjustable .mod-cell+.ui-group{padding-left:0.4rem;margin-top:0}.mod-table-adjustable .cell-primary,.mod-row-adjustable .cell-primary{width:69%}.mod-table-adjustable .cell-actions,.mod-row-adjustable .cell-actions{min-width:15.66667rem;width:28%}.mod-table-adjustable .input-row .cell-primary,.mod-row-adjustable.input-row .cell-primary{width:72%}.mod-setting-control{padding:1rem;width:40%}.modunit-exception+.modunit-exception{border-top:0}.flexbox .mod-row{-webkit-box-align:center;-moz-box-align:center;-ms-box-align:center;box-align:center}.flexbox .input-row,.flexbox .mod-setting,.flexbox .mod-setting-control,.flexbox .mod-table-adjustable .mod-row{-webkit-box-orient:horizontal;-moz-box-orient:horizontal;-ms-box-orient:horizontal;box-orient:horizontal}.flexbox .mod-setting-control{-webkit-box-align:center;-moz-box-align:center;-ms-box-align:center;box-align:center;-webkit-flex-align:center;-moz-flex-align:center;-ms-flex-align:center;flex-align:center}}@media screen and (min-width: 66em){.mod-setting-control{width:30%}}.ui-item{position:relative;max-width:100%;display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline}.ui-item+.ui-item,.ui-item+.btn{margin-left:0.4rem}.ui-item select,.ui-item .select2-container{width:100%}.ui-item input,.ui-item select{margin-bottom:0}.ui-block{display:block;margin-bottom:1em}.ui-group,.btn-group{display:-moz-inline-stack;display:inline-block;vertical-align:middle;*vertical-align:middle;zoom:1;*display:inline;font-size:0;position:relative;vertical-align:middle;white-space:nowrap}.ui-group+.ui-group,.ui-group+.btn-group,.ui-group+.ui-item,.btn-group+.ui-group,.btn-group+.btn-group,.btn-group+.ui-item{margin-left:0.4rem}.ui-group>.btn,.ui-group>.dropdown-menu,.ui-group>.popover,.ui-group>.select2-container,.ui-group>.ui-item,.btn-group>.btn,.btn-group>.dropdown-menu,.btn-group>.popover,.btn-group>.select2-container,.btn-group>.ui-item{font-size:0.93333rem;-webkit-user-select:none;-moz-user-select:none;user-select:none}.ui-item:empty,.ui-group:empty,.btn-group:empty{display:none}.ui-item>input,.ui-group>input,.btn-group>input{font-size:0.86667rem;margin-bottom:0}.ui-toolbar{display:block;font-size:0;margin-bottom:0.66667rem;margin-top:0.66667rem}.ui-toolbar .btn+.ui-item,.ui-toolbar .btn+.ui-group,.ui-toolbar .btn+.btn-group,.ui-toolbar .ui-group+.btn,.ui-toolbar .ui-group+.btn-group,.ui-toolbar .btn-group+.btn,.ui-toolbar .btn-group+.ui-item,.ui-toolbar .ui-item+.btn,.ui-toolbar .ui-item+.btn-group,.ui-toolbar .ui-item+.ui-group{margin-left:0.4rem}.ui-toolbar>.btn,.ui-toolbar>.dropdown-menu,.ui-toolbar>.popover,.ui-toolbar>.select2-container,.ui-toolbar>.ui-item{font-size:0.93333rem;margin-bottom:0}.ui-toolbar>select,.ui-toolbar>input{font-size:13px;margin-bottom:0}.ui-toolbar>i{height:2.26667rem}.btn-group>.btn{position:relative;-webkit-border-radius:0;-moz-border-radius:0;-ms-border-radius:0;-o-border-radius:0;border-radius:0}.btn-group>.btn:hover,.input-picker .picker-list td .btn-group>button.checked-value,.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active{z-index:5}.btn-group>.btn:first-child{-moz-border-radius-topleft:2px;-webkit-border-top-left-radius:2px;border-top-left-radius:2px;-moz-border-radius-bottomleft:2px;-webkit-border-bottom-left-radius:2px;border-bottom-left-radius:2px}.btn-group>.btn:last-child{-moz-border-radius-topright:2px;-webkit-border-top-right-radius:2px;border-top-right-radius:2px;-moz-border-radius-bottomright:2px;-webkit-border-bottom-right-radius:2px;border-bottom-right-radius:2px}.btn-group>.btn+.btn,.btn-group>.btn+.btn-group{margin-left:-1px}.btn-group .btn-group+.btn{margin-left:-1px}.btn-group-vertical>.btn{display:block;float:none;margin:0.4rem auto;max-width:100%}.vert-arrows{width:24px;position:relative}.vert-arrows:before,.vert-arrows:after{border:4px solid transparent;border-bottom-color:#333;content:'';display:block;height:0;left:50%;margin-left:-5px;margin-top:-9px;position:absolute;top:50%;width:0}.vert-arrows:before{border-bottom-color:transparent;border-top-color:#333;margin-top:3px}.horz-arrows{width:24px;position:relative}.horz-arrows:before,.horz-arrows:after{border:4px solid transparent;border-left-color:#333;content:'';display:block;height:0;left:50%;margin-left:2px;margin-top:-3px;position:absolute;top:50%;width:0}.horz-arrows:before{border-left-color:transparent;border-right-color:#333;margin-left:-10px}/* - * Font Awesome 3.0.2 - * the iconic font designed for use with Twitter Bootstrap - * ------------------------------------------------------- - * The full suite of pictographic icons, examples, and documentation - * can be found at: http://fortawesome.github.com/Font-Awesome/ - * - * License - * ------------------------------------------------------- - * - The Font Awesome font is licensed under the SIL Open Font License - http://scripts.sil.org/OFL - * - Font Awesome CSS, LESS, and SASS files are licensed under the MIT License - - * http://opensource.org/licenses/mit-license.html - * - The Font Awesome pictograms are licensed under the CC BY 3.0 License - http://creativecommons.org/licenses/by/3.0/ - * - Attribution is no longer required in Font Awesome 3.0, but much appreciated: - * "Font Awesome by Dave Gandy - http://fortawesome.github.com/Font-Awesome" - * - * Contact - * ------------------------------------------------------- - * Email: dave@davegandy.com - * Twitter: http://twitter.com/fortaweso_me - * Work: Lead Product Designer @ http://kyruus.com - */@font-face{ - font-family:'FontAwesome'; - src:url('../fonts/fontawesome-cloudflare.eot?v=3.0.1'); - src:url('../fonts/fontawesome-cloudflare.eot?#iefix&v=3.0.1') format("embedded-opentype"),url('../fonts/fontawesome-cloudflare.woff?v=3.0.1') format("woff"),url('../fonts/fontawesome-cloudflare.ttf?v=3.0.1') format("truetype"); - font-weight:normal; - font-style:normal -} -.icon{font-family:FontAwesome;font-weight:normal;font-style:normal;text-decoration:inherit;-webkit-font-smoothing:antialiased;display:inline;width:auto;height:auto;line-height:normal;vertical-align:baseline;background-image:none;background-position:0% 0%;background-repeat:repeat;margin-top:0}.icon:before{text-decoration:inherit;display:inline-block;speak:none}.icon-white,.dropdown-menu>li>a:hover>[class^="icon-"],.dropdown-menu>li>a:hover>[class*=" icon-"],.dropdown-menu>.active>a>[class^="icon-"],.dropdown-menu>.active>a>[class*=" icon-"],.dropdown-submenu:hover>a>[class^="icon-"],.dropdown-submenu:hover>a>[class*=" icon-"]{background-image:none}a .icon{display:inline-block}.icon-large:before{vertical-align:-10%;font-size:1.3333333333333334em}.btn .icon,.nav .icon{display:inline}.btn .icon:first-child,.nav .icon:first-child{padding-right:0.25em}.btn .icon:last-child,.nav .icon:last-child{padding-left:0.25em}.btn .icon.icon-large,.nav .icon.icon-large{line-height:.9em}.btn .icon.icon-spin,.nav .icon.icon-spin{display:inline-block}.tabs .icon,.tabs .icon.icon-large{line-height:.9em}li .icon,.nav li .icon{display:inline-block;width:1.25em;text-align:center}li .icon.icon-large,.nav li .icon.icon-large{width:1.5625em}ul.icons{list-style-type:none;text-indent:-.75em}ul.icons li .icon{width:.75em}.icon-muted{color:#fafafa}.icon-border{border:solid 1px #fafafa;padding:.2em .25em .15em;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.icon-2x{font-size:2em}.icon-2x.icon-border{border-width:2px;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.icon-3x{font-size:3em}.icon-3x.icon-border{border-width:3px;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.icon-4x{font-size:4em}.icon-4x.icon-border{border-width:4px;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}.icon.pull-left{margin-right:.3em}.icon.pull-right{margin-left:.3em}.btn .icon.pull-left.icon-2x,.btn .icon.pull-right.icon-2x{margin-top:.18em}.btn .icon.icon-spin.icon-large{line-height:.8em}.btn.btn-small .icon.pull-left.icon-2x,.btn.btn-small .icon.pull-right.icon-2x{margin-top:.25em}.btn.btn-large .icon{margin-top:0}.btn.btn-large .icon.pull-left.icon-2x,.btn.btn-large .icon.pull-right.icon-2x{margin-top:.05em}.btn.btn-large .icon.pull-left.icon-2x{margin-right:.2em}.btn.btn-large .icon.pull-right.icon-2x{margin-left:.2em}.icon-spin{display:inline-block;-moz-animation:spin 2s infinite linear;-o-animation:spin 2s infinite linear;-webkit-animation:spin 2s infinite linear;animation:spin 2s infinite linear}@-moz-keyframes spin{0%{-moz-transform:rotate(0deg)}100%{-moz-transform:rotate(359deg)}}@-webkit-keyframes spin{0%{-webkit-transform:rotate(0deg)}100%{-webkit-transform:rotate(359deg)}}@-o-keyframes spin{0%{-o-transform:rotate(0deg)}100%{-o-transform:rotate(359deg)}}@-ms-keyframes spin{0%{-ms-transform:rotate(0deg)}100%{-ms-transform:rotate(359deg)}}@keyframes spin{0%{transform:rotate(0deg)}100%{transform:rotate(359deg)}}@-moz-document url-prefix(){.icon-spin{height:.9em}.btn .icon-spin{height:auto}.icon-spin.icon-large{height:1.25em}.btn .icon-spin.icon-large{height:.75em}}.icon-glass:before{content:"\f000"}.icon-music:before{content:"\f001"}.icon-search:before{content:"\f002"}.icon-envelope:before{content:"\f003"}.icon-heart:before{content:"\f004"}.icon-star:before{content:"\f005"}.icon-star-empty:before{content:"\f006"}.icon-user:before{content:"\f007"}.icon-film:before{content:"\f008"}.icon-th-large:before{content:"\f009"}.icon-th:before{content:"\f00a"}.icon-th-list:before{content:"\f00b"}.icon-ok:before{content:"\f00c"}.icon-remove:before{content:"\f00d"}.icon-zoom-in:before{content:"\f00e"}.icon-zoom-out:before{content:"\f010"}.icon-off:before{content:"\f011"}.icon-signal:before{content:"\f012"}.icon-cog:before{content:"\f013"}.icon-trash:before{content:"\f014"}.icon-home:before{content:"\f015"}.icon-file:before{content:"\f016"}.icon-time:before{content:"\f017"}.icon-road:before{content:"\f018"}.icon-download-alt:before{content:"\f019"}.icon-download:before{content:"\f01a"}.icon-upload:before{content:"\f01b"}.icon-inbox:before{content:"\f01c"}.icon-play-circle:before{content:"\f01d"}.icon-repeat:before{content:"\f01e"}.icon-refresh:before{content:"\f021"}.icon-list-alt:before{content:"\f022"}.icon-lock:before{content:"\f023"}.icon-flag:before{content:"\f024"}.icon-headphones:before{content:"\f025"}.icon-volume-off:before{content:"\f026"}.icon-volume-down:before{content:"\f027"}.icon-volume-up:before{content:"\f028"}.icon-qrcode:before{content:"\f029"}.icon-barcode:before{content:"\f02a"}.icon-tag:before{content:"\f02b"}.icon-tags:before{content:"\f02c"}.icon-book:before{content:"\f02d"}.icon-bookmark:before{content:"\f02e"}.icon-print:before{content:"\f02f"}.icon-camera:before{content:"\f030"}.icon-font:before{content:"\f031"}.icon-bold:before{content:"\f032"}.icon-italic:before{content:"\f033"}.icon-text-height:before{content:"\f034"}.icon-text-width:before{content:"\f035"}.icon-align-left:before{content:"\f036"}.icon-align-center:before{content:"\f037"}.icon-align-right:before{content:"\f038"}.icon-align-justify:before{content:"\f039"}.icon-list:before{content:"\f03a"}.icon-indent-left:before{content:"\f03b"}.icon-indent-right:before{content:"\f03c"}.icon-facetime-video:before{content:"\f03d"}.icon-picture:before{content:"\f03e"}.icon-pencil:before{content:"\f040"}.icon-map-marker:before{content:"\f041"}.icon-adjust:before{content:"\f042"}.icon-tint:before{content:"\f043"}.icon-edit:before{content:"\f044"}.icon-share:before{content:"\f045"}.icon-check:before{content:"\f046"}.icon-move:before{content:"\f047"}.icon-step-backward:before{content:"\f048"}.icon-fast-backward:before{content:"\f049"}.icon-backward:before{content:"\f04a"}.icon-play:before{content:"\f04b"}.icon-pause:before{content:"\f04c"}.icon-stop:before{content:"\f04d"}.icon-forward:before{content:"\f04e"}.icon-fast-forward:before{content:"\f050"}.icon-step-forward:before{content:"\f051"}.icon-eject:before{content:"\f052"}.icon-chevron-left:before{content:"\f053"}.icon-chevron-right:before{content:"\f054"}.icon-plus-sign:before{content:"\f055"}.icon-minus-sign:before{content:"\f056"}.icon-remove-sign:before{content:"\f057"}.icon-ok-sign:before{content:"\f058"}.icon-question-sign:before{content:"\f059"}.icon-info-sign:before{content:"\f05a"}.icon-screenshot:before{content:"\f05b"}.icon-remove-circle:before{content:"\f05c"}.icon-ok-circle:before{content:"\f05d"}.icon-ban-circle:before{content:"\f05e"}.icon-arrow-left:before{content:"\f060"}.icon-arrow-right:before{content:"\f061"}.icon-arrow-up:before{content:"\f062"}.icon-arrow-down:before{content:"\f063"}.icon-share-alt:before{content:"\f064"}.icon-resize-full:before{content:"\f065"}.icon-resize-small:before{content:"\f066"}.icon-plus:before{content:"\f067"}.icon-minus:before{content:"\f068"}.icon-asterisk:before{content:"\f069"}.icon-exclamation-sign:before{content:"\f06a"}.icon-gift:before{content:"\f06b"}.icon-leaf:before{content:"\f06c"}.icon-fire:before{content:"\f06d"}.icon-eye-open:before{content:"\f06e"}.icon-eye-close:before{content:"\f070"}.icon-warning-sign:before{content:"\f071"}.icon-plane:before{content:"\f072"}.icon-calendar:before{content:"\f073"}.icon-random:before{content:"\f074"}.icon-comment:before{content:"\f075"}.icon-magnet:before{content:"\f076"}.icon-chevron-up:before{content:"\f077"}.icon-chevron-down:before{content:"\f078"}.icon-retweet:before{content:"\f079"}.icon-shopping-cart:before{content:"\f07a"}.icon-folder-close:before{content:"\f07b"}.icon-folder-open:before{content:"\f07c"}.icon-resize-vertical:before{content:"\f07d"}.icon-resize-horizontal:before{content:"\f07e"}.icon-bar-chart:before{content:"\f080"}.icon-twitter-sign:before{content:"\f081"}.icon-facebook-sign:before{content:"\f082"}.icon-camera-retro:before{content:"\f083"}.icon-key:before{content:"\f084"}.icon-cogs:before{content:"\f085"}.icon-comments:before{content:"\f086"}.icon-thumbs-up:before{content:"\f087"}.icon-thumbs-down:before{content:"\f088"}.icon-star-half:before{content:"\f089"}.icon-heart-empty:before{content:"\f08a"}.icon-signout:before{content:"\f08b"}.icon-linkedin-sign:before{content:"\f08c"}.icon-pushpin:before{content:"\f08d"}.icon-external-link:before{content:"\f08e"}.icon-signin:before{content:"\f090"}.icon-trophy:before{content:"\f091"}.icon-github-sign:before{content:"\f092"}.icon-upload-alt:before{content:"\f093"}.icon-lemon:before{content:"\f094"}.icon-phone:before{content:"\f095"}.icon-check-empty:before{content:"\f096"}.icon-bookmark-empty:before{content:"\f097"}.icon-phone-sign:before{content:"\f098"}.icon-twitter:before{content:"\f099"}.icon-facebook:before{content:"\f09a"}.icon-github:before{content:"\f09b"}.icon-unlock:before{content:"\f09c"}.icon-credit-card:before{content:"\f09d"}.icon-rss:before{content:"\f09e"}.icon-hdd:before{content:"\f0a0"}.icon-bullhorn:before{content:"\f0a1"}.icon-bell:before{content:"\f0a2"}.icon-certificate:before{content:"\f0a3"}.icon-hand-right:before{content:"\f0a4"}.icon-hand-left:before{content:"\f0a5"}.icon-hand-up:before{content:"\f0a6"}.icon-hand-down:before{content:"\f0a7"}.icon-circle-arrow-left:before{content:"\f0a8"}.icon-circle-arrow-right:before{content:"\f0a9"}.icon-circle-arrow-up:before{content:"\f0aa"}.icon-circle-arrow-down:before{content:"\f0ab"}.icon-globe:before{content:"\f0ac"}.icon-wrench:before{content:"\f0ad"}.icon-tasks:before{content:"\f0ae"}.icon-filter:before{content:"\f0b0"}.icon-briefcase:before{content:"\f0b1"}.icon-fullscreen:before{content:"\f0b2"}.icon-group:before{content:"\f0c0"}.icon-link:before{content:"\f0c1"}.icon-cloud:before{content:"\f0c2"}.icon-beaker:before{content:"\f0c3"}.icon-cut:before{content:"\f0c4"}.icon-copy:before{content:"\f0c5"}.icon-paper-clip:before{content:"\f0c6"}.icon-save:before{content:"\f0c7"}.icon-sign-blank:before{content:"\f0c8"}.icon-reorder:before{content:"\f0c9"}.icon-list-ul:before{content:"\f0ca"}.icon-list-ol:before{content:"\f0cb"}.icon-strikethrough:before{content:"\f0cc"}.icon-underline:before{content:"\f0cd"}.icon-table:before{content:"\f0ce"}.icon-magic:before{content:"\f0d0"}.icon-truck:before{content:"\f0d1"}.icon-pinterest:before{content:"\f0d2"}.icon-pinterest-sign:before{content:"\f0d3"}.icon-google-plus-sign:before{content:"\f0d4"}.icon-google-plus:before{content:"\f0d5"}.icon-money:before{content:"\f0d6"}.icon-caret-down:before{content:"\f0d7"}.icon-caret-up:before{content:"\f0d8"}.icon-caret-left:before{content:"\f0d9"}.icon-caret-right:before{content:"\f0da"}.icon-columns:before{content:"\f0db"}.icon-sort:before{content:"\f0dc"}.icon-sort-down:before{content:"\f0dd"}.icon-sort-up:before{content:"\f0de"}.icon-envelope-alt:before{content:"\f0e0"}.icon-linkedin:before{content:"\f0e1"}.icon-undo:before{content:"\f0e2"}.icon-legal:before{content:"\f0e3"}.icon-dashboard:before{content:"\f0e4"}.icon-comment-alt:before{content:"\f0e5"}.icon-comments-alt:before{content:"\f0e6"}.icon-bolt:before{content:"\f0e7"}.icon-sitemap:before{content:"\f0e8"}.icon-umbrella:before{content:"\f0e9"}.icon-paste:before{content:"\f0ea"}.icon-lightbulb:before{content:"\f0eb"}.icon-exchange:before{content:"\f0ec"}.icon-cloud-download:before{content:"\f0ed"}.icon-cloud-upload:before{content:"\f0ee"}.icon-user-md:before{content:"\f0f0"}.icon-stethoscope:before{content:"\f0f1"}.icon-suitcase:before{content:"\f0f2"}.icon-bell-alt:before{content:"\f0f3"}.icon-coffee:before{content:"\f0f4"}.icon-food:before{content:"\f0f5"}.icon-file-alt:before{content:"\f0f6"}.icon-building:before{content:"\f0f7"}.icon-hospital:before{content:"\f0f8"}.icon-ambulance:before{content:"\f0f9"}.icon-medkit:before{content:"\f0fa"}.icon-fighter-jet:before{content:"\f0fb"}.icon-beer:before{content:"\f0fc"}.icon-h-sign:before{content:"\f0fd"}.icon-plus-sign-alt:before{content:"\f0fe"}.icon-double-angle-left:before{content:"\f100"}.icon-double-angle-right:before{content:"\f101"}.icon-double-angle-up:before{content:"\f102"}.icon-double-angle-down:before{content:"\f103"}.icon-angle-left:before{content:"\f104"}.icon-angle-right:before{content:"\f105"}.icon-angle-up:before{content:"\f106"}.icon-angle-down:before{content:"\f107"}.icon-desktop:before{content:"\f108"}.icon-laptop:before{content:"\f109"}.icon-tablet:before{content:"\f10a"}.icon-mobile-phone:before{content:"\f10b"}.icon-circle-blank:before{content:"\f10c"}.icon-quote-left:before{content:"\f10d"}.icon-quote-right:before{content:"\f10e"}.icon-spinner:before{content:"\f110"}.icon-circle:before{content:"\f111"}.icon-reply:before{content:"\f112"}.icon-github-alt:before{content:"\f113"}.icon-folder-close-alt:before{content:"\f114"}.icon-folder-open-alt:before{content:"\f115"}body{background-color:#fff;-webkit-text-size-adjust:none}.site-wrapper{background-color:#f5f5f5}hr,.hr{border:0;border-top:1px solid #dedede;display:block;height:0;margin:2rem 0;width:100%}hr.double,.hr.double{border-top:3px double #dedede}.drag-handle{cursor:move}.drag-handle.vert-arrows{cursor:ns-resize}.drag-handle.horz-arrows{cursor:ew-resize}.login-form{max-width:30.4rem}.flexbox .flex{display:-webkit-box;display:-moz-box;display:-ms-box;display:box;-webkit-box-pack:center;-moz-box-pack:center;-ms-box-pack:center;box-pack:center;-webkit-box-align:start;-moz-box-align:start;-ms-box-align:start;box-align:start;-webkit-flex-align:start;-moz-flex-align:start;-ms-flex-align:start;flex-align:start;-webkit-box-orient:vertical;-moz-box-orient:vertical;-ms-box-orient:vertical;box-orient:vertical}.flexbox .flex>*{display:block;-webkit-box-flex:0;-moz-box-flex:0;-ms-box-flex:0;box-flex:0}.flexbox .flex>*+*{margin-top:0.4rem}.flexbox .flex.flex-horz{-webkit-box-orient:horizontal;-moz-box-orient:horizontal;-ms-box-orient:horizontal;box-orient:horizontal}.flexbox .flex.flex-horz>*+*{margin-top:0}.flexbox .flex-primary{-webkit-box-flex:10;-moz-box-flex:10;-ms-box-flex:10;box-flex:10}.sticky-item{z-index:1300;-webkit-box-shadow:0 2px 4px rgba(0,0,0,0.25);-moz-box-shadow:0 2px 4px rgba(0,0,0,0.25);box-shadow:0 2px 4px rgba(0,0,0,0.25)}@media screen and (min-width: 49.2em){.flexbox .flex{-webkit-box-align:center;-moz-box-align:center;-ms-box-align:center;box-align:center;-webkit-flex-align:center;-moz-flex-align:center;-ms-flex-align:center;flex-align:center;-webkit-box-orient:horizontal;-moz-box-orient:horizontal;-ms-box-orient:horizontal;box-orient:horizontal}.flexbox .flex>*+*{margin-top:auto}.flexbox .flex.flex-vert{-webkit-box-orient:vertical;-moz-box-orient:vertical;-ms-box-orient:vertical;box-orient:vertical}.flexbox .flex.flex-vert>*+*{margin-top:0.4rem}.flexbox .flex.flex-start{-webkit-box-align:start;-moz-box-align:start;-ms-box-align:start;box-align:start;-webkit-flex-align:start;-moz-flex-align:start;-ms-flex-align:start;flex-align:start}}body{background-color:#f5f5f5}hr,.hr{background:#c9c9c9;border:0;display:block;height:1px;margin:2.25em 0;width:100%}.post+.post{border-top:1px solid #dedede;margin-top:2em;padding-top:1em}.post .title a{color:#4d4d4d}.post .title a:hover{color:#f68b1f}.post header{margin-bottom:1.5rem}.post footer{margin-top:0.66667rem}.meta{font-size:0.86667em;font-style:italic}.social{margin-top:1em}.social div,.social span,.social iframe{display:inline-block;}.featured-image{margin-bottom:2.66667rem}.featured-image img{display:block;margin:0 auto;width:100%}.widget+.widget{margin-top:1.5em}.widget-title{margin-bottom:0.26667rem}.sidebar blockquote{color:#7e7e7e;font-size:0.86667rem}.sidebar .widget-title{color:#777}.widget-title+.menu-sidebar{margin-top:0.5rem}.post-content img{display:block;margin:0 auto}.post-content iframe{margin:1.5em auto;display:block;width:100%}.learn-more{background:#eee;padding:1em;-webkit-border-radius:2px;-moz-border-radius:2px;-ms-border-radius:2px;-o-border-radius:2px;border-radius:2px}h1,.h1,h2,.h2{line-height:1.4}.sidebar .widget p,.sidebar .widget ul{font-size:80%;color:#7e7e7e}ol,ul{margin-left:2em}@media screen and (min-width: 49.2em){.social{height:25px}}@media print{#header,footer,aside,.social,#footer,.sidebar{display:none}h1,h2,h3,h4,h5,h6,p,li{color:black}body{font-size:12px}p,a,li{font-size:12px}h1{font-size:24px}h2{font-size:16px}h3{font-size:14px}.post-content a:link:after,.post-content a:visited:after{content:" (" attr(href) ") ";font-size:85%}} - .fb-like { - line-height: 1; - vertical-align: top; - margin: 0 10px; -} -.older-posts { - float:right; -} -.credits { - clear: both; - padding: 2rem 0 0 0; - font-size: 12px; - color: #bbb; -} -.credits a { - color: #bbb; - vertical-align: bottom; -} - -code{ - background-clip: padding-box; - border: 1px solid #ccc; - color: #333; - background-color: #eaeaea; - display: inline-block; - line-height: 20px; - margin: 0 2px -1px; - padding: 0px 3px; - vertical-align: baseline; - font-family: monospace; -} - -pre{ - background-color: #eaeaea; -} - -pre code{ - border: none !important; -} \ No newline at end of file diff --git a/h2mux/shared_buffer.go b/h2mux/shared_buffer.go deleted file mode 100644 index 31868c8c..00000000 --- a/h2mux/shared_buffer.go +++ /dev/null @@ -1,67 +0,0 @@ -package h2mux - -import ( - "bytes" - "io" - "sync" -) - -type SharedBuffer struct { - cond *sync.Cond - buffer bytes.Buffer - eof bool -} - -func NewSharedBuffer() *SharedBuffer { - return &SharedBuffer{ - cond: sync.NewCond(&sync.Mutex{}), - } -} - -func (s *SharedBuffer) Read(p []byte) (n int, err error) { - totalRead := 0 - s.cond.L.Lock() - for totalRead == 0 { - n, err = s.buffer.Read(p[totalRead:]) - totalRead += n - if err == io.EOF { - if s.eof { - break - } - err = nil - if n > 0 { - break - } - s.cond.Wait() - } - } - s.cond.L.Unlock() - return totalRead, err -} - -func (s *SharedBuffer) Write(p []byte) (n int, err error) { - s.cond.L.Lock() - defer s.cond.L.Unlock() - if s.eof { - return 0, io.EOF - } - n, err = s.buffer.Write(p) - s.cond.Signal() - return -} - -func (s *SharedBuffer) Close() error { - s.cond.L.Lock() - defer s.cond.L.Unlock() - if !s.eof { - s.eof = true - s.cond.Signal() - } - return nil -} - -func (s *SharedBuffer) Closed() bool { - s.cond.L.Lock() - defer s.cond.L.Unlock() - return s.eof -} diff --git a/h2mux/shared_buffer_test.go b/h2mux/shared_buffer_test.go deleted file mode 100644 index fe438fae..00000000 --- a/h2mux/shared_buffer_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package h2mux - -import ( - "bytes" - "io" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func AssertIOReturnIsGood(t *testing.T, expected int) func(int, error) { - return func(actual int, err error) { - if expected != actual { - t.Fatalf("Expected %d bytes, got %d", expected, actual) - } - if err != nil { - t.Fatalf("Unexpected error %s", err) - } - } -} - -func TestSharedBuffer(t *testing.T) { - b := NewSharedBuffer() - testData := []byte("Hello world") - AssertIOReturnIsGood(t, len(testData))(b.Write(testData)) - bytesRead := make([]byte, len(testData)) - AssertIOReturnIsGood(t, len(testData))(b.Read(bytesRead)) -} - -func TestSharedBufferBlockingRead(t *testing.T) { - b := NewSharedBuffer() - testData1 := []byte("Hello") - testData2 := []byte(" world") - result := make(chan []byte) - go func() { - bytesRead := make([]byte, len(testData1)+len(testData2)) - nRead, err := b.Read(bytesRead) - AssertIOReturnIsGood(t, len(testData1))(nRead, err) - result <- bytesRead[:nRead] - nRead, err = b.Read(bytesRead) - AssertIOReturnIsGood(t, len(testData2))(nRead, err) - result <- bytesRead[:nRead] - }() - time.Sleep(time.Millisecond * 250) - select { - case <-result: - t.Fatalf("read returned early") - default: - } - AssertIOReturnIsGood(t, len(testData1))(b.Write([]byte(testData1))) - select { - case r := <-result: - assert.Equal(t, testData1, r) - case <-time.After(time.Second): - t.Fatalf("read timed out") - } - AssertIOReturnIsGood(t, len(testData2))(b.Write([]byte(testData2))) - select { - case r := <-result: - assert.Equal(t, testData2, r) - case <-time.After(time.Second): - t.Fatalf("read timed out") - } -} - -// This is quite slow under the race detector -func TestSharedBufferConcurrentReadWrite(t *testing.T) { - b := NewSharedBuffer() - var expectedResult, actualResult bytes.Buffer - var wg sync.WaitGroup - wg.Add(2) - go func() { - block := make([]byte, 256) - for i := range block { - block[i] = byte(i) - } - for blockSize := 1; blockSize <= 256; blockSize++ { - for i := 0; i < 256; i++ { - expectedResult.Write(block[:blockSize]) - n, err := b.Write(block[:blockSize]) - if n != blockSize || err != nil { - t.Errorf("write error: %d %s", n, err) - return - } - } - } - wg.Done() - }() - go func() { - block := make([]byte, 256) - // Change block sizes in opposition to the write thread, to test blocking for new data. - for blockSize := 256; blockSize > 0; blockSize-- { - for i := 0; i < 256; i++ { - n, err := io.ReadFull(b, block[:blockSize]) - if n != blockSize || err != nil { - t.Errorf("read error: %d %s", n, err) - return - } - actualResult.Write(block[:blockSize]) - } - } - wg.Done() - }() - wg.Wait() - if bytes.Compare(expectedResult.Bytes(), actualResult.Bytes()) != 0 { - t.Fatal("Result diverged") - } -} - -func TestSharedBufferClose(t *testing.T) { - b := NewSharedBuffer() - testData := []byte("Hello world") - AssertIOReturnIsGood(t, len(testData))(b.Write(testData)) - err := b.Close() - if err != nil { - t.Fatalf("unexpected error from Close: %s", err) - } - bytesRead := make([]byte, len(testData)) - AssertIOReturnIsGood(t, len(testData))(b.Read(bytesRead)) - n, err := b.Read(bytesRead) - if n != 0 { - t.Fatalf("extra bytes received: %d", n) - } - if err != io.EOF { - t.Fatalf("expected EOF, got %s", err) - } -} diff --git a/h2mux/signal.go b/h2mux/signal.go deleted file mode 100644 index d716aed2..00000000 --- a/h2mux/signal.go +++ /dev/null @@ -1,34 +0,0 @@ -package h2mux - -// Signal describes an event that can be waited on for at least one signal. -// Signalling the event while it is in the signalled state is a noop. -// When the waiter wakes up, the signal is set to unsignalled. -// It is a way for any number of writers to inform a reader (without blocking) -// that an event has happened. -type Signal struct { - c chan struct{} -} - -// NewSignal creates a new Signal. -func NewSignal() Signal { - return Signal{c: make(chan struct{}, 1)} -} - -// Signal signals the event. -func (s Signal) Signal() { - // This channel is buffered, so the nonblocking send will always succeed if the buffer is empty. - select { - case s.c <- struct{}{}: - default: - } -} - -// Wait for the event to be signalled. -func (s Signal) Wait() { - <-s.c -} - -// WaitChannel returns a channel that is readable after Signal is called. -func (s Signal) WaitChannel() <-chan struct{} { - return s.c -} diff --git a/h2mux/streamerrormap.go b/h2mux/streamerrormap.go deleted file mode 100644 index 926b5ff2..00000000 --- a/h2mux/streamerrormap.go +++ /dev/null @@ -1,47 +0,0 @@ -package h2mux - -import ( - "sync" - - "golang.org/x/net/http2" -) - -// StreamErrorMap is used to track stream errors. This is a separate structure to ActiveStreamMap because -// errors can be raised against non-existent or closed streams. -type StreamErrorMap struct { - sync.RWMutex - // errors tracks per-stream errors - errors map[uint32]http2.ErrCode - // hasError is signaled whenever an error is raised. - hasError Signal -} - -// NewStreamErrorMap creates a new StreamErrorMap. -func NewStreamErrorMap() *StreamErrorMap { - return &StreamErrorMap{ - errors: make(map[uint32]http2.ErrCode), - hasError: NewSignal(), - } -} - -// RaiseError raises a stream error. -func (s *StreamErrorMap) RaiseError(streamID uint32, err http2.ErrCode) { - s.Lock() - s.errors[streamID] = err - s.Unlock() - s.hasError.Signal() -} - -// GetSignalChan returns a channel that is signalled when an error is raised. -func (s *StreamErrorMap) GetSignalChan() <-chan struct{} { - return s.hasError.WaitChannel() -} - -// GetErrors retrieves all errors currently raised. This resets the currently-tracked errors. -func (s *StreamErrorMap) GetErrors() map[uint32]http2.ErrCode { - s.Lock() - errors := s.errors - s.errors = make(map[uint32]http2.ErrCode) - s.Unlock() - return errors -} diff --git a/ingress/icmp_darwin.go b/ingress/icmp_darwin.go index 4e315f15..31972ac5 100644 --- a/ingress/icmp_darwin.go +++ b/ingress/icmp_darwin.go @@ -28,10 +28,8 @@ type icmpProxy struct { srcFunnelTracker *packet.FunnelTracker echoIDTracker *echoIDTracker conn *icmp.PacketConn - // Response is handled in one-by-one, so encoder can be shared between funnels - encoder *packet.Encoder - logger *zerolog.Logger - idleTimeout time.Duration + logger *zerolog.Logger + idleTimeout time.Duration } // echoIDTracker tracks which ID has been assigned. It first loops through assignment from lastAssignment to then end, @@ -114,8 +112,8 @@ func (snf echoFunnelID) String() string { return strconv.FormatUint(uint64(snf), 10) } -func newICMPProxy(listenIP netip.Addr, zone string, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) { - conn, err := newICMPConn(listenIP, zone) +func newICMPProxy(listenIP netip.Addr, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) { + conn, err := newICMPConn(listenIP) if err != nil { return nil, err } @@ -123,16 +121,15 @@ func newICMPProxy(listenIP netip.Addr, zone string, logger *zerolog.Logger, idle return &icmpProxy{ srcFunnelTracker: packet.NewFunnelTracker(), echoIDTracker: newEchoIDTracker(), - encoder: packet.NewEncoder(), conn: conn, logger: logger, idleTimeout: idleTimeout, }, nil } -func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder *packetResponder) error { - _, span := responder.requestSpan(ctx, pk) - defer responder.exportSpan() +func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder ICMPResponder) error { + _, span := responder.RequestSpan(ctx, pk) + defer responder.ExportSpan() originalEcho, err := getICMPEcho(pk.Message) if err != nil { @@ -154,7 +151,7 @@ func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder *pa } span.SetAttributes(attribute.Int("assignedEchoID", int(assignedEchoID))) - shouldReplaceFunnelFunc := createShouldReplaceFunnelFunc(ip.logger, responder.datagramMuxer, pk, originalEcho.ID) + shouldReplaceFunnelFunc := createShouldReplaceFunnelFunc(ip.logger, responder, pk, originalEcho.ID) newFunnelFunc := func() (packet.Funnel, error) { originalEcho, err := getICMPEcho(pk.Message) if err != nil { @@ -164,7 +161,7 @@ func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder *pa ip.echoIDTracker.release(echoIDTrackerKey, assignedEchoID) return nil } - icmpFlow := newICMPEchoFlow(pk.Src, closeCallback, ip.conn, responder, int(assignedEchoID), originalEcho.ID, ip.encoder) + icmpFlow := newICMPEchoFlow(pk.Src, closeCallback, ip.conn, responder, int(assignedEchoID), originalEcho.ID) return icmpFlow, nil } funnelID := echoFunnelID(assignedEchoID) @@ -265,8 +262,8 @@ func (ip *icmpProxy) sendReply(ctx context.Context, reply *echoReply) error { return err } - _, span := icmpFlow.responder.replySpan(ctx, ip.logger) - defer icmpFlow.responder.exportSpan() + _, span := icmpFlow.responder.ReplySpan(ctx, ip.logger) + defer icmpFlow.responder.ExportSpan() if err := icmpFlow.returnToSrc(reply); err != nil { tracing.EndWithErrorStatus(span, err) diff --git a/ingress/icmp_generic.go b/ingress/icmp_generic.go index 88e2581d..4964244a 100644 --- a/ingress/icmp_generic.go +++ b/ingress/icmp_generic.go @@ -18,7 +18,7 @@ var errICMPProxyNotImplemented = fmt.Errorf("ICMP proxy is not implemented on %s type icmpProxy struct{} -func (ip icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder *packetResponder) error { +func (ip icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder ICMPResponder) error { return errICMPProxyNotImplemented } @@ -26,6 +26,6 @@ func (ip *icmpProxy) Serve(ctx context.Context) error { return errICMPProxyNotImplemented } -func newICMPProxy(listenIP netip.Addr, zone string, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) { +func newICMPProxy(listenIP netip.Addr, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) { return nil, errICMPProxyNotImplemented } diff --git a/ingress/icmp_linux.go b/ingress/icmp_linux.go index 829a000a..0b263a8f 100644 --- a/ingress/icmp_linux.go +++ b/ingress/icmp_linux.go @@ -37,25 +37,23 @@ var ( type icmpProxy struct { srcFunnelTracker *packet.FunnelTracker listenIP netip.Addr - ipv6Zone string logger *zerolog.Logger idleTimeout time.Duration } -func newICMPProxy(listenIP netip.Addr, zone string, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) { - if err := testPermission(listenIP, zone, logger); err != nil { +func newICMPProxy(listenIP netip.Addr, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) { + if err := testPermission(listenIP, logger); err != nil { return nil, err } return &icmpProxy{ srcFunnelTracker: packet.NewFunnelTracker(), listenIP: listenIP, - ipv6Zone: zone, logger: logger, idleTimeout: idleTimeout, }, nil } -func testPermission(listenIP netip.Addr, zone string, logger *zerolog.Logger) error { +func testPermission(listenIP netip.Addr, logger *zerolog.Logger) error { // Opens a non-privileged ICMP socket. On Linux the group ID of the process needs to be in ping_group_range // Only check ping_group_range once for IPv4 if listenIP.Is4() { @@ -64,7 +62,7 @@ func testPermission(listenIP netip.Addr, zone string, logger *zerolog.Logger) er return err } } - conn, err := newICMPConn(listenIP, zone) + conn, err := newICMPConn(listenIP) if err != nil { return err } @@ -98,9 +96,9 @@ func checkInPingGroup() error { return fmt.Errorf("did not find group range in %s", pingGroupPath) } -func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder *packetResponder) error { - ctx, span := responder.requestSpan(ctx, pk) - defer responder.exportSpan() +func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder ICMPResponder) error { + ctx, span := responder.RequestSpan(ctx, pk) + defer responder.ExportSpan() originalEcho, err := getICMPEcho(pk.Message) if err != nil { @@ -109,9 +107,9 @@ func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder *pa } observeICMPRequest(ip.logger, span, pk.Src.String(), pk.Dst.String(), originalEcho.ID, originalEcho.Seq) - shouldReplaceFunnelFunc := createShouldReplaceFunnelFunc(ip.logger, responder.datagramMuxer, pk, originalEcho.ID) + shouldReplaceFunnelFunc := createShouldReplaceFunnelFunc(ip.logger, responder, pk, originalEcho.ID) newFunnelFunc := func() (packet.Funnel, error) { - conn, err := newICMPConn(ip.listenIP, ip.ipv6Zone) + conn, err := newICMPConn(ip.listenIP) if err != nil { tracing.EndWithErrorStatus(span, err) return nil, errors.Wrap(err, "failed to open ICMP socket") @@ -127,7 +125,7 @@ func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder *pa span.SetAttributes(attribute.Int("port", localUDPAddr.Port)) echoID := localUDPAddr.Port - icmpFlow := newICMPEchoFlow(pk.Src, closeCallback, conn, responder, echoID, originalEcho.ID, packet.NewEncoder()) + icmpFlow := newICMPEchoFlow(pk.Src, closeCallback, conn, responder, echoID, originalEcho.ID) return icmpFlow, nil } funnelID := flow3Tuple{ @@ -181,8 +179,8 @@ func (ip *icmpProxy) listenResponse(ctx context.Context, flow *icmpEchoFlow) { // Listens for ICMP response and handles error logging func (ip *icmpProxy) handleResponse(ctx context.Context, flow *icmpEchoFlow, buf []byte) (done bool) { - _, span := flow.responder.replySpan(ctx, ip.logger) - defer flow.responder.exportSpan() + _, span := flow.responder.ReplySpan(ctx, ip.logger) + defer flow.responder.ExportSpan() span.SetAttributes( attribute.Int("originalEchoID", flow.originalEchoID), diff --git a/ingress/icmp_posix.go b/ingress/icmp_posix.go index b03be49e..a5353917 100644 --- a/ingress/icmp_posix.go +++ b/ingress/icmp_posix.go @@ -18,15 +18,11 @@ import ( ) // Opens a non-privileged ICMP socket on Linux and Darwin -func newICMPConn(listenIP netip.Addr, zone string) (*icmp.PacketConn, error) { +func newICMPConn(listenIP netip.Addr) (*icmp.PacketConn, error) { if listenIP.Is4() { return icmp.ListenPacket("udp4", listenIP.String()) } - listenAddr := listenIP.String() - if zone != "" { - listenAddr = listenAddr + "%" + zone - } - return icmp.ListenPacket("udp6", listenAddr) + return icmp.ListenPacket("udp6", listenIP.String()) } func netipAddr(addr net.Addr) (netip.Addr, bool) { @@ -34,7 +30,8 @@ func netipAddr(addr net.Addr) (netip.Addr, bool) { if !ok { return netip.Addr{}, false } - return netip.AddrFromSlice(udpAddr.IP) + + return udpAddr.AddrPort().Addr(), true } type flow3Tuple struct { @@ -50,14 +47,12 @@ type icmpEchoFlow struct { closed *atomic.Bool src netip.Addr originConn *icmp.PacketConn - responder *packetResponder + responder ICMPResponder assignedEchoID int originalEchoID int - // it's up to the user to ensure respEncoder is not used concurrently - respEncoder *packet.Encoder } -func newICMPEchoFlow(src netip.Addr, closeCallback func() error, originConn *icmp.PacketConn, responder *packetResponder, assignedEchoID, originalEchoID int, respEncoder *packet.Encoder) *icmpEchoFlow { +func newICMPEchoFlow(src netip.Addr, closeCallback func() error, originConn *icmp.PacketConn, responder ICMPResponder, assignedEchoID, originalEchoID int) *icmpEchoFlow { return &icmpEchoFlow{ ActivityTracker: packet.NewActivityTracker(), closeCallback: closeCallback, @@ -67,7 +62,6 @@ func newICMPEchoFlow(src netip.Addr, closeCallback func() error, originConn *icm responder: responder, assignedEchoID: assignedEchoID, originalEchoID: originalEchoID, - respEncoder: respEncoder, } } @@ -139,11 +133,7 @@ func (ief *icmpEchoFlow) returnToSrc(reply *echoReply) error { }, Message: reply.msg, } - serializedPacket, err := ief.respEncoder.Encode(&pk) - if err != nil { - return err - } - return ief.responder.returnPacket(serializedPacket) + return ief.responder.ReturnPacket(&pk) } type echoReply struct { @@ -184,7 +174,7 @@ func toICMPEchoFlow(funnel packet.Funnel) (*icmpEchoFlow, error) { return icmpFlow, nil } -func createShouldReplaceFunnelFunc(logger *zerolog.Logger, muxer muxer, pk *packet.ICMP, originalEchoID int) func(packet.Funnel) bool { +func createShouldReplaceFunnelFunc(logger *zerolog.Logger, responder ICMPResponder, pk *packet.ICMP, originalEchoID int) func(packet.Funnel) bool { return func(existing packet.Funnel) bool { existingFlow, err := toICMPEchoFlow(existing) if err != nil { @@ -199,7 +189,7 @@ func createShouldReplaceFunnelFunc(logger *zerolog.Logger, muxer muxer, pk *pack // If the existing flow has a different muxer, there's a new quic connection where return packets should be // routed. Otherwise, return packets will be send to the first observed incoming connection, rather than the // most recently observed connection. - if existingFlow.responder.datagramMuxer != muxer { + if existingFlow.responder.ConnectionIndex() != responder.ConnectionIndex() { logger.Debug(). Str("src", pk.Src.String()). Str("dst", pk.Dst.String()). diff --git a/ingress/icmp_posix_test.go b/ingress/icmp_posix_test.go index ea8dc7e9..6231e1b9 100644 --- a/ingress/icmp_posix_test.go +++ b/ingress/icmp_posix_test.go @@ -27,7 +27,7 @@ func TestFunnelIdleTimeout(t *testing.T) { startSeq = 8129 ) logger := zerolog.New(os.Stderr) - proxy, err := newICMPProxy(localhostIP, "", &logger, idleTimeout) + proxy, err := newICMPProxy(localhostIP, &logger, idleTimeout) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -56,24 +56,19 @@ func TestFunnelIdleTimeout(t *testing.T) { }, } muxer := newMockMuxer(0) - responder := packetResponder{ - datagramMuxer: muxer, - } - require.NoError(t, proxy.Request(ctx, &pk, &responder)) + responder := newPacketResponder(muxer, 0, packet.NewEncoder()) + require.NoError(t, proxy.Request(ctx, &pk, responder)) validateEchoFlow(t, <-muxer.cfdToEdge, &pk) // Send second request, should reuse the funnel - require.NoError(t, proxy.Request(ctx, &pk, &packetResponder{ - datagramMuxer: muxer, - })) + require.NoError(t, proxy.Request(ctx, &pk, responder)) validateEchoFlow(t, <-muxer.cfdToEdge, &pk) + // New muxer on a different connection should use a new flow time.Sleep(idleTimeout * 2) newMuxer := newMockMuxer(0) - newResponder := packetResponder{ - datagramMuxer: newMuxer, - } - require.NoError(t, proxy.Request(ctx, &pk, &newResponder)) + newResponder := newPacketResponder(newMuxer, 1, packet.NewEncoder()) + require.NoError(t, proxy.Request(ctx, &pk, newResponder)) validateEchoFlow(t, <-newMuxer.cfdToEdge, &pk) time.Sleep(idleTimeout * 2) @@ -90,7 +85,7 @@ func TestReuseFunnel(t *testing.T) { startSeq = 8129 ) logger := zerolog.New(os.Stderr) - proxy, err := newICMPProxy(localhostIP, "", &logger, idleTimeout) + proxy, err := newICMPProxy(localhostIP, &logger, idleTimeout) require.NoError(t, err) ctx, cancel := context.WithCancel(context.Background()) @@ -124,18 +119,14 @@ func TestReuseFunnel(t *testing.T) { originalEchoID: echoID, } muxer := newMockMuxer(0) - responder := packetResponder{ - datagramMuxer: muxer, - } - require.NoError(t, proxy.Request(ctx, &pk, &responder)) + responder := newPacketResponder(muxer, 0, packet.NewEncoder()) + require.NoError(t, proxy.Request(ctx, &pk, responder)) validateEchoFlow(t, <-muxer.cfdToEdge, &pk) funnel1, found := getFunnel(t, proxy, tuple) require.True(t, found) // Send second request, should reuse the funnel - require.NoError(t, proxy.Request(ctx, &pk, &packetResponder{ - datagramMuxer: muxer, - })) + require.NoError(t, proxy.Request(ctx, &pk, responder)) validateEchoFlow(t, <-muxer.cfdToEdge, &pk) funnel2, found := getFunnel(t, proxy, tuple) require.True(t, found) diff --git a/ingress/icmp_windows.go b/ingress/icmp_windows.go index 19604ee4..23c3eb50 100644 --- a/ingress/icmp_windows.go +++ b/ingress/icmp_windows.go @@ -13,7 +13,6 @@ import ( "fmt" "net/netip" "runtime/debug" - "sync" "syscall" "time" "unsafe" @@ -222,11 +221,9 @@ type icmpProxy struct { // This is a ICMPv6 if srcSocketAddr is not nil srcSocketAddr *sockAddrIn6 logger *zerolog.Logger - // A pool of reusable *packet.Encoder - encoderPool sync.Pool } -func newICMPProxy(listenIP netip.Addr, zone string, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) { +func newICMPProxy(listenIP netip.Addr, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) { var ( srcSocketAddr *sockAddrIn6 handle uintptr @@ -250,11 +247,6 @@ func newICMPProxy(listenIP netip.Addr, zone string, logger *zerolog.Logger, idle handle: handle, srcSocketAddr: srcSocketAddr, logger: logger, - encoderPool: sync.Pool{ - New: func() any { - return packet.NewEncoder() - }, - }, }, nil } @@ -267,15 +259,15 @@ func (ip *icmpProxy) Serve(ctx context.Context) error { // Request sends an ICMP echo request and wait for a reply or timeout. // The async version of Win32 APIs take a callback whose memory is not garbage collected, so we use the synchronous version. // It's possible that a slow request will block other requests, so we set the timeout to only 1s. -func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder *packetResponder) error { +func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder ICMPResponder) error { defer func() { if r := recover(); r != nil { ip.logger.Error().Interface("error", r).Msgf("Recover panic from sending icmp request/response, error %s", debug.Stack()) } }() - _, requestSpan := responder.requestSpan(ctx, pk) - defer responder.exportSpan() + _, requestSpan := responder.RequestSpan(ctx, pk) + defer responder.ExportSpan() echo, err := getICMPEcho(pk.Message) if err != nil { @@ -290,9 +282,9 @@ func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder *pa return err } tracing.End(requestSpan) - responder.exportSpan() + responder.ExportSpan() - _, replySpan := responder.replySpan(ctx, ip.logger) + _, replySpan := responder.ReplySpan(ctx, ip.logger) err = ip.handleEchoReply(pk, echo, resp, responder) if err != nil { ip.logger.Err(err).Msg("Failed to send ICMP reply") @@ -308,7 +300,7 @@ func (ip *icmpProxy) Request(ctx context.Context, pk *packet.ICMP, responder *pa return nil } -func (ip *icmpProxy) handleEchoReply(request *packet.ICMP, echoReq *icmp.Echo, resp echoResp, responder *packetResponder) error { +func (ip *icmpProxy) handleEchoReply(request *packet.ICMP, echoReq *icmp.Echo, resp echoResp, responder ICMPResponder) error { var replyType icmp.Type if request.Dst.Is4() { replyType = ipv4.ICMPTypeEchoReply @@ -333,21 +325,7 @@ func (ip *icmpProxy) handleEchoReply(request *packet.ICMP, echoReq *icmp.Echo, r }, }, } - - cachedEncoder := ip.encoderPool.Get() - // The encoded packet is a slice to of the encoder, so we shouldn't return the encoder back to the pool until - // the encoded packet is sent. - defer ip.encoderPool.Put(cachedEncoder) - encoder, ok := cachedEncoder.(*packet.Encoder) - if !ok { - return fmt.Errorf("encoderPool returned %T, expect *packet.Encoder", cachedEncoder) - } - - serializedPacket, err := encoder.Encode(&pk) - if err != nil { - return err - } - return responder.returnPacket(serializedPacket) + return responder.ReturnPacket(&pk) } func (ip *icmpProxy) icmpEchoRoundtrip(dst netip.Addr, echo *icmp.Echo) (echoResp, error) { diff --git a/ingress/icmp_windows_test.go b/ingress/icmp_windows_test.go index 8d7ad6b3..fa102442 100644 --- a/ingress/icmp_windows_test.go +++ b/ingress/icmp_windows_test.go @@ -132,7 +132,7 @@ func TestSendEchoErrors(t *testing.T) { } func testSendEchoErrors(t *testing.T, listenIP netip.Addr) { - proxy, err := newICMPProxy(listenIP, "", &noopLogger, time.Second) + proxy, err := newICMPProxy(listenIP, &noopLogger, time.Second) require.NoError(t, err) echo := icmp.Echo{ diff --git a/ingress/origin_icmp_proxy.go b/ingress/origin_icmp_proxy.go index 3eb0a6b3..981b8667 100644 --- a/ingress/origin_icmp_proxy.go +++ b/ingress/origin_icmp_proxy.go @@ -14,6 +14,7 @@ import ( "golang.org/x/net/ipv6" "github.com/cloudflare/cloudflared/packet" + "github.com/cloudflare/cloudflared/tracing" ) const ( @@ -26,17 +27,46 @@ var ( errPacketNil = fmt.Errorf("packet is nil") ) +// ICMPRouterServer is a parent interface over-top of ICMPRouter that allows for the operation of the proxy origin listeners. +type ICMPRouterServer interface { + ICMPRouter + // Serve runs the ICMPRouter proxy origin listeners for any of the IPv4 or IPv6 interfaces configured. + Serve(ctx context.Context) error +} + +// ICMPRouter manages out-going ICMP requests towards the origin. +type ICMPRouter interface { + // Request will send an ICMP packet towards the origin with an ICMPResponder to attach to the ICMP flow for the + // response to utilize. + Request(ctx context.Context, pk *packet.ICMP, responder ICMPResponder) error + // ConvertToTTLExceeded will take an ICMP packet and create a ICMP TTL Exceeded response origininating from the + // ICMPRouter's IP interface. + ConvertToTTLExceeded(pk *packet.ICMP, rawPacket packet.RawPacket) *packet.ICMP +} + +// ICMPResponder manages how to handle incoming ICMP messages coming from the origin to the edge. +type ICMPResponder interface { + ConnectionIndex() uint8 + ReturnPacket(pk *packet.ICMP) error + AddTraceContext(tracedCtx *tracing.TracedContext, serializedIdentity []byte) + RequestSpan(ctx context.Context, pk *packet.ICMP) (context.Context, trace.Span) + ReplySpan(ctx context.Context, logger *zerolog.Logger) (context.Context, trace.Span) + ExportSpan() +} + type icmpRouter struct { ipv4Proxy *icmpProxy + ipv4Src netip.Addr ipv6Proxy *icmpProxy + ipv6Src netip.Addr } // NewICMPRouter doesn't return an error if either ipv4 proxy or ipv6 proxy can be created. The machine might only // support one of them. // funnelIdleTimeout controls how long to wait to close a funnel without send/return -func NewICMPRouter(ipv4Addr, ipv6Addr netip.Addr, ipv6Zone string, logger *zerolog.Logger, funnelIdleTimeout time.Duration) (*icmpRouter, error) { - ipv4Proxy, ipv4Err := newICMPProxy(ipv4Addr, "", logger, funnelIdleTimeout) - ipv6Proxy, ipv6Err := newICMPProxy(ipv6Addr, ipv6Zone, logger, funnelIdleTimeout) +func NewICMPRouter(ipv4Addr, ipv6Addr netip.Addr, logger *zerolog.Logger, funnelIdleTimeout time.Duration) (ICMPRouterServer, error) { + ipv4Proxy, ipv4Err := newICMPProxy(ipv4Addr, logger, funnelIdleTimeout) + ipv6Proxy, ipv6Err := newICMPProxy(ipv6Addr, logger, funnelIdleTimeout) if ipv4Err != nil && ipv6Err != nil { err := fmt.Errorf("cannot create ICMPv4 proxy: %v nor ICMPv6 proxy: %v", ipv4Err, ipv6Err) logger.Debug().Err(err).Msg("ICMP proxy feature is disabled") @@ -52,7 +82,9 @@ func NewICMPRouter(ipv4Addr, ipv6Addr netip.Addr, ipv6Zone string, logger *zerol } return &icmpRouter{ ipv4Proxy: ipv4Proxy, + ipv4Src: ipv4Addr, ipv6Proxy: ipv6Proxy, + ipv6Src: ipv6Addr, }, nil } @@ -76,7 +108,7 @@ func (ir *icmpRouter) Serve(ctx context.Context) error { return fmt.Errorf("ICMPv4 proxy and ICMPv6 proxy are both nil") } -func (ir *icmpRouter) Request(ctx context.Context, pk *packet.ICMP, responder *packetResponder) error { +func (ir *icmpRouter) Request(ctx context.Context, pk *packet.ICMP, responder ICMPResponder) error { if pk == nil { return errPacketNil } @@ -92,6 +124,16 @@ func (ir *icmpRouter) Request(ctx context.Context, pk *packet.ICMP, responder *p return fmt.Errorf("ICMPv6 proxy was not instantiated") } +func (ir *icmpRouter) ConvertToTTLExceeded(pk *packet.ICMP, rawPacket packet.RawPacket) *packet.ICMP { + var srcIP netip.Addr + if pk.Dst.Is4() { + srcIP = ir.ipv4Src + } else { + srcIP = ir.ipv6Src + } + return packet.NewICMPTTLExceedPacket(pk.IP, rawPacket, srcIP) +} + func getICMPEcho(msg *icmp.Message) (*icmp.Echo, error) { echo, ok := msg.Body.(*icmp.Echo) if !ok { diff --git a/ingress/origin_icmp_proxy_test.go b/ingress/origin_icmp_proxy_test.go index 6c7b2f8c..b85013be 100644 --- a/ingress/origin_icmp_proxy_test.go +++ b/ingress/origin_icmp_proxy_test.go @@ -50,7 +50,7 @@ func testICMPRouterEcho(t *testing.T, sendIPv4 bool) { endSeq = 20 ) - router, err := NewICMPRouter(localhostIP, localhostIPv6, "", &noopLogger, testFunnelIdleTimeout) + router, err := NewICMPRouter(localhostIP, localhostIPv6, &noopLogger, testFunnelIdleTimeout) require.NoError(t, err) proxyDone := make(chan struct{}) @@ -61,9 +61,7 @@ func testICMPRouterEcho(t *testing.T, sendIPv4 bool) { }() muxer := newMockMuxer(1) - responder := packetResponder{ - datagramMuxer: muxer, - } + responder := newPacketResponder(muxer, 0, packet.NewEncoder()) protocol := layers.IPProtocolICMPv6 if sendIPv4 { @@ -98,7 +96,7 @@ func testICMPRouterEcho(t *testing.T, sendIPv4 bool) { }, }, } - require.NoError(t, router.Request(ctx, &pk, &responder)) + require.NoError(t, router.Request(ctx, &pk, responder)) validateEchoFlow(t, <-muxer.cfdToEdge, &pk) } } @@ -114,7 +112,7 @@ func TestTraceICMPRouterEcho(t *testing.T) { tracingCtx := "ec31ad8a01fde11fdcabe2efdce36873:52726f6cabc144f5:0:1" - router, err := NewICMPRouter(localhostIP, localhostIPv6, "", &noopLogger, testFunnelIdleTimeout) + router, err := NewICMPRouter(localhostIP, localhostIPv6, &noopLogger, testFunnelIdleTimeout) require.NoError(t, err) proxyDone := make(chan struct{}) @@ -131,11 +129,8 @@ func TestTraceICMPRouterEcho(t *testing.T) { serializedIdentity, err := tracingIdentity.MarshalBinary() require.NoError(t, err) - responder := packetResponder{ - datagramMuxer: muxer, - tracedCtx: tracing.NewTracedContext(ctx, tracingIdentity.String(), &noopLogger), - serializedIdentity: serializedIdentity, - } + responder := newPacketResponder(muxer, 0, packet.NewEncoder()) + responder.AddTraceContext(tracing.NewTracedContext(ctx, tracingIdentity.String(), &noopLogger), serializedIdentity) echo := &icmp.Echo{ ID: 12910, @@ -156,7 +151,7 @@ func TestTraceICMPRouterEcho(t *testing.T) { }, } - require.NoError(t, router.Request(ctx, &pk, &responder)) + require.NoError(t, router.Request(ctx, &pk, responder)) firstPK := <-muxer.cfdToEdge var requestSpan *quicpogs.TracingSpanPacket // The order of receiving reply or request span is not deterministic @@ -194,10 +189,8 @@ func TestTraceICMPRouterEcho(t *testing.T) { echo.Seq++ pk.Body = echo // Only first request for a flow is traced. The edge will not send tracing context for the second request - newResponder := packetResponder{ - datagramMuxer: muxer, - } - require.NoError(t, router.Request(ctx, &pk, &newResponder)) + newResponder := newPacketResponder(muxer, 0, packet.NewEncoder()) + require.NoError(t, router.Request(ctx, &pk, newResponder)) validateEchoFlow(t, <-muxer.cfdToEdge, &pk) select { @@ -221,7 +214,7 @@ func TestConcurrentRequestsToSameDst(t *testing.T) { endSeq = 5 ) - router, err := NewICMPRouter(localhostIP, localhostIPv6, "", &noopLogger, testFunnelIdleTimeout) + router, err := NewICMPRouter(localhostIP, localhostIPv6, &noopLogger, testFunnelIdleTimeout) require.NoError(t, err) proxyDone := make(chan struct{}) @@ -240,9 +233,7 @@ func TestConcurrentRequestsToSameDst(t *testing.T) { defer wg.Done() muxer := newMockMuxer(1) - responder := packetResponder{ - datagramMuxer: muxer, - } + responder := newPacketResponder(muxer, 0, packet.NewEncoder()) for seq := 0; seq < endSeq; seq++ { pk := &packet.ICMP{ IP: &packet.IP{ @@ -261,16 +252,14 @@ func TestConcurrentRequestsToSameDst(t *testing.T) { }, }, } - require.NoError(t, router.Request(ctx, pk, &responder)) + require.NoError(t, router.Request(ctx, pk, responder)) validateEchoFlow(t, <-muxer.cfdToEdge, pk) } }() go func() { defer wg.Done() muxer := newMockMuxer(1) - responder := packetResponder{ - datagramMuxer: muxer, - } + responder := newPacketResponder(muxer, 0, packet.NewEncoder()) for seq := 0; seq < endSeq; seq++ { pk := &packet.ICMP{ IP: &packet.IP{ @@ -289,7 +278,7 @@ func TestConcurrentRequestsToSameDst(t *testing.T) { }, }, } - require.NoError(t, router.Request(ctx, pk, &responder)) + require.NoError(t, router.Request(ctx, pk, responder)) validateEchoFlow(t, <-muxer.cfdToEdge, pk) } }() @@ -358,13 +347,11 @@ func TestICMPRouterRejectNotEcho(t *testing.T) { } func testICMPRouterRejectNotEcho(t *testing.T, srcDstIP netip.Addr, msgs []icmp.Message) { - router, err := NewICMPRouter(localhostIP, localhostIPv6, "", &noopLogger, testFunnelIdleTimeout) + router, err := NewICMPRouter(localhostIP, localhostIPv6, &noopLogger, testFunnelIdleTimeout) require.NoError(t, err) muxer := newMockMuxer(1) - responder := packetResponder{ - datagramMuxer: muxer, - } + responder := newPacketResponder(muxer, 0, packet.NewEncoder()) protocol := layers.IPProtocolICMPv4 if srcDstIP.Is6() { protocol = layers.IPProtocolICMPv6 @@ -379,7 +366,7 @@ func testICMPRouterRejectNotEcho(t *testing.T, srcDstIP netip.Addr, msgs []icmp. }, Message: &m, } - require.Error(t, router.Request(context.Background(), &pk, &responder)) + require.Error(t, router.Request(context.Background(), &pk, responder)) } } diff --git a/ingress/origin_udp_proxy.go b/ingress/origin_udp_proxy.go index 836489be..012c05c0 100644 --- a/ingress/origin_udp_proxy.go +++ b/ingress/origin_udp_proxy.go @@ -4,6 +4,7 @@ import ( "fmt" "io" "net" + "net/netip" ) type UDPProxy interface { @@ -30,3 +31,16 @@ func DialUDP(dstIP net.IP, dstPort uint16) (UDPProxy, error) { return &udpProxy{udpConn}, nil } + +func DialUDPAddrPort(dest netip.AddrPort) (*net.UDPConn, error) { + addr := net.UDPAddrFromAddrPort(dest) + + // We use nil as local addr to force runtime to find the best suitable local address IP given the destination + // address as context. + udpConn, err := net.DialUDP("udp", nil, addr) + if err != nil { + return nil, fmt.Errorf("unable to dial udp to origin %s: %w", dest, err) + } + + return udpConn, nil +} diff --git a/ingress/packet_router.go b/ingress/packet_router.go index 1e15163a..c1224843 100644 --- a/ingress/packet_router.go +++ b/ingress/packet_router.go @@ -3,7 +3,6 @@ package ingress import ( "context" "fmt" - "net/netip" "github.com/rs/zerolog" "go.opentelemetry.io/otel/attribute" @@ -23,29 +22,23 @@ type muxer interface { // PacketRouter routes packets between Upstream and ICMPRouter. Currently it rejects all other type of ICMP packets type PacketRouter struct { - globalConfig *GlobalRouterConfig - muxer muxer - logger *zerolog.Logger - icmpDecoder *packet.ICMPDecoder - encoder *packet.Encoder -} - -// GlobalRouterConfig is the configuration shared by all instance of Router. -type GlobalRouterConfig struct { - ICMPRouter *icmpRouter - IPv4Src netip.Addr - IPv6Src netip.Addr - Zone string + icmpRouter ICMPRouter + muxer muxer + connIndex uint8 + logger *zerolog.Logger + encoder *packet.Encoder + decoder *packet.ICMPDecoder } // NewPacketRouter creates a PacketRouter that handles ICMP packets. Packets are read from muxer but dropped if globalConfig is nil. -func NewPacketRouter(globalConfig *GlobalRouterConfig, muxer muxer, logger *zerolog.Logger) *PacketRouter { +func NewPacketRouter(icmpRouter ICMPRouter, muxer muxer, connIndex uint8, logger *zerolog.Logger) *PacketRouter { return &PacketRouter{ - globalConfig: globalConfig, - muxer: muxer, - logger: logger, - icmpDecoder: packet.NewICMPDecoder(), - encoder: packet.NewEncoder(), + icmpRouter: icmpRouter, + muxer: muxer, + connIndex: connIndex, + logger: logger, + encoder: packet.NewEncoder(), + decoder: packet.NewICMPDecoder(), } } @@ -59,14 +52,13 @@ func (r *PacketRouter) Serve(ctx context.Context) error { } } -func (r *PacketRouter) nextPacket(ctx context.Context) (packet.RawPacket, *packetResponder, error) { +func (r *PacketRouter) nextPacket(ctx context.Context) (packet.RawPacket, ICMPResponder, error) { pk, err := r.muxer.ReceivePacket(ctx) if err != nil { return packet.RawPacket{}, nil, err } - responder := &packetResponder{ - datagramMuxer: r.muxer, - } + responder := newPacketResponder(r.muxer, r.connIndex, packet.NewEncoder()) + switch pk.Type() { case quicpogs.DatagramTypeIP: return packet.RawPacket{Data: pk.Payload()}, responder, nil @@ -75,8 +67,8 @@ func (r *PacketRouter) nextPacket(ctx context.Context) (packet.RawPacket, *packe if err := identity.UnmarshalBinary(pk.Metadata()); err != nil { r.logger.Err(err).Bytes("tracingIdentity", pk.Metadata()).Msg("Failed to unmarshal tracing identity") } else { - responder.tracedCtx = tracing.NewTracedContext(ctx, identity.String(), r.logger) - responder.serializedIdentity = pk.Metadata() + tracedCtx := tracing.NewTracedContext(ctx, identity.String(), r.logger) + responder.AddTraceContext(tracedCtx, pk.Metadata()) } return packet.RawPacket{Data: pk.Payload()}, responder, nil default: @@ -84,27 +76,27 @@ func (r *PacketRouter) nextPacket(ctx context.Context) (packet.RawPacket, *packe } } -func (r *PacketRouter) handlePacket(ctx context.Context, rawPacket packet.RawPacket, responder *packetResponder) { +func (r *PacketRouter) handlePacket(ctx context.Context, rawPacket packet.RawPacket, responder ICMPResponder) { // ICMP Proxy feature is disabled, drop packets - if r.globalConfig == nil { + if r.icmpRouter == nil { return } - icmpPacket, err := r.icmpDecoder.Decode(rawPacket) + icmpPacket, err := r.decoder.Decode(rawPacket) if err != nil { r.logger.Err(err).Msg("Failed to decode ICMP packet from quic datagram") return } if icmpPacket.TTL <= 1 { - if err := r.sendTTLExceedMsg(ctx, icmpPacket, rawPacket, r.encoder); err != nil { + if err := r.sendTTLExceedMsg(icmpPacket, rawPacket); err != nil { r.logger.Err(err).Msg("Failed to return ICMP TTL exceed error") } return } icmpPacket.TTL-- - if err := r.globalConfig.ICMPRouter.Request(ctx, icmpPacket, responder); err != nil { + if err := r.icmpRouter.Request(ctx, icmpPacket, responder); err != nil { r.logger.Err(err). Str("src", icmpPacket.Src.String()). Str("dst", icmpPacket.Dst.String()). @@ -113,16 +105,9 @@ func (r *PacketRouter) handlePacket(ctx context.Context, rawPacket packet.RawPac } } -func (r *PacketRouter) sendTTLExceedMsg(ctx context.Context, pk *packet.ICMP, rawPacket packet.RawPacket, encoder *packet.Encoder) error { - var srcIP netip.Addr - if pk.Dst.Is4() { - srcIP = r.globalConfig.IPv4Src - } else { - srcIP = r.globalConfig.IPv6Src - } - ttlExceedPacket := packet.NewICMPTTLExceedPacket(pk.IP, rawPacket, srcIP) - - encodedTTLExceed, err := encoder.Encode(ttlExceedPacket) +func (r *PacketRouter) sendTTLExceedMsg(pk *packet.ICMP, rawPacket packet.RawPacket) error { + icmpTTLPacket := r.icmpRouter.ConvertToTTLExceeded(pk, rawPacket) + encodedTTLExceed, err := r.encoder.Encode(icmpTTLPacket) if err != nil { return err } @@ -132,22 +117,45 @@ func (r *PacketRouter) sendTTLExceedMsg(ctx context.Context, pk *packet.ICMP, ra // packetResponder should not be used concurrently. This assumption is upheld because reply packets are ready one-by-one type packetResponder struct { datagramMuxer muxer + connIndex uint8 + encoder *packet.Encoder tracedCtx *tracing.TracedContext serializedIdentity []byte // hadReply tracks if there has been any reply for this flow hadReply bool } +func newPacketResponder(datagramMuxer muxer, connIndex uint8, encoder *packet.Encoder) ICMPResponder { + return &packetResponder{ + datagramMuxer: datagramMuxer, + connIndex: connIndex, + encoder: encoder, + } +} + func (pr *packetResponder) tracingEnabled() bool { return pr.tracedCtx != nil } -func (pr *packetResponder) returnPacket(rawPacket packet.RawPacket) error { +func (pr *packetResponder) ConnectionIndex() uint8 { + return pr.connIndex +} + +func (pr *packetResponder) ReturnPacket(pk *packet.ICMP) error { + rawPacket, err := pr.encoder.Encode(pk) + if err != nil { + return err + } pr.hadReply = true return pr.datagramMuxer.SendPacket(quicpogs.RawPacket(rawPacket)) } -func (pr *packetResponder) requestSpan(ctx context.Context, pk *packet.ICMP) (context.Context, trace.Span) { +func (pr *packetResponder) AddTraceContext(tracedCtx *tracing.TracedContext, serializedIdentity []byte) { + pr.tracedCtx = tracedCtx + pr.serializedIdentity = serializedIdentity +} + +func (pr *packetResponder) RequestSpan(ctx context.Context, pk *packet.ICMP) (context.Context, trace.Span) { if !pr.tracingEnabled() { return ctx, tracing.NewNoopSpan() } @@ -157,14 +165,14 @@ func (pr *packetResponder) requestSpan(ctx context.Context, pk *packet.ICMP) (co )) } -func (pr *packetResponder) replySpan(ctx context.Context, logger *zerolog.Logger) (context.Context, trace.Span) { +func (pr *packetResponder) ReplySpan(ctx context.Context, logger *zerolog.Logger) (context.Context, trace.Span) { if !pr.tracingEnabled() || pr.hadReply { return ctx, tracing.NewNoopSpan() } return pr.tracedCtx.Tracer().Start(pr.tracedCtx, "icmp-echo-reply") } -func (pr *packetResponder) exportSpan() { +func (pr *packetResponder) ExportSpan() { if !pr.tracingEnabled() { return } diff --git a/ingress/packet_router_test.go b/ingress/packet_router_test.go index 403a2274..77ab5e89 100644 --- a/ingress/packet_router_test.go +++ b/ingress/packet_router_test.go @@ -19,16 +19,17 @@ import ( ) var ( - packetConfig = &GlobalRouterConfig{ - ICMPRouter: nil, - IPv4Src: netip.MustParseAddr("172.16.0.1"), - IPv6Src: netip.MustParseAddr("fd51:2391:523:f4ee::1"), + defaultRouter = &icmpRouter{ + ipv4Proxy: nil, + ipv4Src: netip.MustParseAddr("172.16.0.1"), + ipv6Proxy: nil, + ipv6Src: netip.MustParseAddr("fd51:2391:523:f4ee::1"), } ) func TestRouterReturnTTLExceed(t *testing.T) { muxer := newMockMuxer(0) - router := NewPacketRouter(packetConfig, muxer, &noopLogger) + router := NewPacketRouter(defaultRouter, muxer, 0, &noopLogger) ctx, cancel := context.WithCancel(context.Background()) routerStopped := make(chan struct{}) go func() { @@ -53,7 +54,7 @@ func TestRouterReturnTTLExceed(t *testing.T) { }, }, } - assertTTLExceed(t, &pk, router.globalConfig.IPv4Src, muxer) + assertTTLExceed(t, &pk, defaultRouter.ipv4Src, muxer) pk = packet.ICMP{ IP: &packet.IP{ Src: netip.MustParseAddr("fd51:2391:523:f4ee::1"), @@ -71,7 +72,7 @@ func TestRouterReturnTTLExceed(t *testing.T) { }, }, } - assertTTLExceed(t, &pk, router.globalConfig.IPv6Src, muxer) + assertTTLExceed(t, &pk, defaultRouter.ipv6Src, muxer) cancel() <-routerStopped diff --git a/logger/configuration.go b/logger/configuration.go index 79dc6220..d406a220 100644 --- a/logger/configuration.go +++ b/logger/configuration.go @@ -76,10 +76,10 @@ func CreateConfig( var file *FileConfig var rolling *RollingConfig - if rollingLogPath != "" { - rolling = createRollingConfig(rollingLogPath) - } else if nonRollingLogFilePath != "" { + if nonRollingLogFilePath != "" { file = createFileConfig(nonRollingLogFilePath) + } else if rollingLogPath != "" { + rolling = createRollingConfig(rollingLogPath) } if minLevel == "" { diff --git a/metrics/metrics.go b/metrics/metrics.go index 1759451e..2a4fe993 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -10,10 +10,13 @@ import ( "sync" "time" + "github.com/facebookgo/grace/gracenet" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" "github.com/rs/zerolog" "golang.org/x/net/trace" + + "github.com/cloudflare/cloudflared/diagnostic" ) const ( @@ -21,8 +24,37 @@ const ( defaultShutdownTimeout = time.Second * 15 ) +// This variable is set at compile time to allow the default local address to change. +var Runtime = "host" + +func GetMetricsDefaultAddress(runtimeType string) string { + // When issuing the diagnostic command we may have to reach a server that is + // running in a virtual environment and in that case we must bind to 0.0.0.0 + // otherwise the server won't be reachable. + switch runtimeType { + case "virtual": + return "0.0.0.0:0" + default: + return "localhost:0" + } +} + +// GetMetricsKnownAddresses returns the addresses used by the metrics server to bind at +// startup time to allow a semi-deterministic approach to know where the server is listening at. +// The ports were selected because at the time we are in 2024 and they do not collide with any +// know/registered port according https://en.wikipedia.org/wiki/List_of_TCP_and_UDP_port_numbers. +func GetMetricsKnownAddresses(runtimeType string) []string { + switch runtimeType { + case "virtual": + return []string{"0.0.0.0:20241", "0.0.0.0:20242", "0.0.0.0:20243", "0.0.0.0:20244", "0.0.0.0:20245"} + default: + return []string{"localhost:20241", "localhost:20242", "localhost:20243", "localhost:20244", "localhost:20245"} + } +} + type Config struct { ReadyServer *ReadyServer + DiagnosticHandler *diagnostic.Handler QuickTunnelHostname string Orchestrator orchestrator @@ -62,9 +94,47 @@ func newMetricsHandler( }) } + config.DiagnosticHandler.InstallEndpoints(router) + return router } +// CreateMetricsListener will create a new [net.Listener] by using an +// known set of ports when the default address is passed with the fallback +// of choosing a random port when none is available. +// +// In case the provided address is not the default one then it will be used +// as is. +func CreateMetricsListener(listeners *gracenet.Net, laddr string) (net.Listener, error) { + if laddr == GetMetricsDefaultAddress(Runtime) { + // On the presence of the default address select + // a port from the known set of addresses iteratively. + addresses := GetMetricsKnownAddresses(Runtime) + for _, address := range addresses { + listener, err := listeners.Listen("tcp", address) + if err == nil { + return listener, nil + } + } + + // When no port is available then bind to a random one + listener, err := listeners.Listen("tcp", laddr) + if err != nil { + return nil, fmt.Errorf("failed to listen to default metrics address: %w", err) + } + + return listener, nil + } + + // Explicitly got a local address then bind to it + listener, err := listeners.Listen("tcp", laddr) + if err != nil { + return nil, fmt.Errorf("failed to bind to address (%s): %w", laddr, err) + } + + return listener, nil +} + func ServeMetrics( l net.Listener, ctx context.Context, diff --git a/metrics/metrics_test.go b/metrics/metrics_test.go new file mode 100644 index 00000000..849076d7 --- /dev/null +++ b/metrics/metrics_test.go @@ -0,0 +1,52 @@ +package metrics_test + +import ( + "testing" + + "github.com/facebookgo/grace/gracenet" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/cloudflare/cloudflared/metrics" +) + +func TestMetricsListenerCreation(t *testing.T) { + t.Parallel() + listeners := gracenet.Net{} + listener1, err := metrics.CreateMetricsListener(&listeners, metrics.GetMetricsDefaultAddress("host")) + assert.Equal(t, "127.0.0.1:20241", listener1.Addr().String()) + require.NoError(t, err) + listener2, err := metrics.CreateMetricsListener(&listeners, metrics.GetMetricsDefaultAddress("host")) + assert.Equal(t, "127.0.0.1:20242", listener2.Addr().String()) + require.NoError(t, err) + listener3, err := metrics.CreateMetricsListener(&listeners, metrics.GetMetricsDefaultAddress("host")) + assert.Equal(t, "127.0.0.1:20243", listener3.Addr().String()) + require.NoError(t, err) + listener4, err := metrics.CreateMetricsListener(&listeners, metrics.GetMetricsDefaultAddress("host")) + assert.Equal(t, "127.0.0.1:20244", listener4.Addr().String()) + require.NoError(t, err) + listener5, err := metrics.CreateMetricsListener(&listeners, metrics.GetMetricsDefaultAddress("host")) + assert.Equal(t, "127.0.0.1:20245", listener5.Addr().String()) + require.NoError(t, err) + listener6, err := metrics.CreateMetricsListener(&listeners, metrics.GetMetricsDefaultAddress("host")) + addresses := [5]string{"127.0.0.1:20241", "127.0.0.1:20242", "127.0.0.1:20243", "127.0.0.1:20244", "127.0.0.1:20245"} + assert.NotContains(t, addresses, listener6.Addr().String()) + require.NoError(t, err) + listener7, err := metrics.CreateMetricsListener(&listeners, "localhost:12345") + assert.Equal(t, "127.0.0.1:12345", listener7.Addr().String()) + require.NoError(t, err) + err = listener1.Close() + require.NoError(t, err) + err = listener2.Close() + require.NoError(t, err) + err = listener3.Close() + require.NoError(t, err) + err = listener4.Close() + require.NoError(t, err) + err = listener5.Close() + require.NoError(t, err) + err = listener6.Close() + require.NoError(t, err) + err = listener7.Close() + require.NoError(t, err) +} diff --git a/metrics/readiness.go b/metrics/readiness.go index b4e2025d..0e5124f1 100644 --- a/metrics/readiness.go +++ b/metrics/readiness.go @@ -6,9 +6,7 @@ import ( "net/http" "github.com/google/uuid" - "github.com/rs/zerolog" - conn "github.com/cloudflare/cloudflared/connection" "github.com/cloudflare/cloudflared/tunnelstate" ) @@ -19,17 +17,16 @@ type ReadyServer struct { } // NewReadyServer initializes a ReadyServer and starts listening for dis/connection events. -func NewReadyServer(log *zerolog.Logger, clientID uuid.UUID) *ReadyServer { +func NewReadyServer( + clientID uuid.UUID, + tracker *tunnelstate.ConnTracker, +) *ReadyServer { return &ReadyServer{ - clientID: clientID, - tracker: tunnelstate.NewConnTracker(log), + clientID, + tracker, } } -func (rs *ReadyServer) OnTunnelEvent(c conn.Event) { - rs.tracker.OnTunnelEvent(c) -} - type body struct { Status int `json:"status"` ReadyConnections uint `json:"readyConnections"` diff --git a/metrics/readiness_test.go b/metrics/readiness_test.go index 8e035f85..240f171e 100644 --- a/metrics/readiness_test.go +++ b/metrics/readiness_test.go @@ -1,136 +1,106 @@ -package metrics +package metrics_test import ( + "encoding/json" "net/http" + "net/http/httptest" "testing" "github.com/google/uuid" "github.com/rs/zerolog" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/cloudflare/cloudflared/connection" + "github.com/cloudflare/cloudflared/metrics" "github.com/cloudflare/cloudflared/tunnelstate" ) -func TestReadyServer_makeResponse(t *testing.T) { - type fields struct { - isConnected map[uint8]tunnelstate.ConnectionInfo - } - tests := []struct { - name string - fields fields - wantOK bool - wantReadyConnections uint - }{ - { - name: "One connection online => HTTP 200", - fields: fields{ - isConnected: map[uint8]tunnelstate.ConnectionInfo{ - 0: {IsConnected: false}, - 1: {IsConnected: false}, - 2: {IsConnected: true}, - 3: {IsConnected: false}, - }, - }, - wantOK: true, - wantReadyConnections: 1, - }, - { - name: "No connections online => no HTTP 200", - fields: fields{ - isConnected: map[uint8]tunnelstate.ConnectionInfo{ - 0: {IsConnected: false}, - 1: {IsConnected: false}, - 2: {IsConnected: false}, - 3: {IsConnected: false}, - }, - }, - wantReadyConnections: 0, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - rs := &ReadyServer{ - tracker: tunnelstate.MockedConnTracker(tt.fields.isConnected), - } - gotStatusCode, gotReadyConnections := rs.makeResponse() - if tt.wantOK && gotStatusCode != http.StatusOK { - t.Errorf("ReadyServer.makeResponse() gotStatusCode = %v, want ok = %v", gotStatusCode, tt.wantOK) - } - if gotReadyConnections != tt.wantReadyConnections { - t.Errorf("ReadyServer.makeResponse() gotReadyConnections = %v, want %v", gotReadyConnections, tt.wantReadyConnections) - } - }) +func mockRequest(t *testing.T, readyServer *metrics.ReadyServer) (int, uint) { + t.Helper() + + var readyreadyConnections struct { + Status int `json:"status"` + ReadyConnections uint `json:"readyConnections"` + ConnectorID uuid.UUID `json:"connectorId"` } + rec := httptest.NewRecorder() + readyServer.ServeHTTP(rec, nil) + + decoder := json.NewDecoder(rec.Body) + err := decoder.Decode(&readyreadyConnections) + require.NoError(t, err) + return rec.Code, readyreadyConnections.ReadyConnections } func TestReadinessEventHandling(t *testing.T) { nopLogger := zerolog.Nop() - rs := NewReadyServer(&nopLogger, uuid.Nil) + tracker := tunnelstate.NewConnTracker(&nopLogger) + rs := metrics.NewReadyServer(uuid.Nil, tracker) // start not ok - code, ready := rs.makeResponse() + code, readyConnections := mockRequest(t, rs) assert.NotEqualValues(t, http.StatusOK, code) - assert.Zero(t, ready) + assert.Zero(t, readyConnections) // one connected => ok - rs.OnTunnelEvent(connection.Event{ + tracker.OnTunnelEvent(connection.Event{ Index: 1, EventType: connection.Connected, }) - code, ready = rs.makeResponse() + code, readyConnections = mockRequest(t, rs) assert.EqualValues(t, http.StatusOK, code) - assert.EqualValues(t, 1, ready) + assert.EqualValues(t, 1, readyConnections) // another connected => still ok - rs.OnTunnelEvent(connection.Event{ + tracker.OnTunnelEvent(connection.Event{ Index: 2, EventType: connection.Connected, }) - code, ready = rs.makeResponse() + code, readyConnections = mockRequest(t, rs) assert.EqualValues(t, http.StatusOK, code) - assert.EqualValues(t, 2, ready) + assert.EqualValues(t, 2, readyConnections) // one reconnecting => still ok - rs.OnTunnelEvent(connection.Event{ + tracker.OnTunnelEvent(connection.Event{ Index: 2, EventType: connection.Reconnecting, }) - code, ready = rs.makeResponse() + code, readyConnections = mockRequest(t, rs) assert.EqualValues(t, http.StatusOK, code) - assert.EqualValues(t, 1, ready) + assert.EqualValues(t, 1, readyConnections) // Regression test for TUN-3777 - rs.OnTunnelEvent(connection.Event{ + tracker.OnTunnelEvent(connection.Event{ Index: 1, EventType: connection.RegisteringTunnel, }) - code, ready = rs.makeResponse() + code, readyConnections = mockRequest(t, rs) assert.NotEqualValues(t, http.StatusOK, code) - assert.Zero(t, ready) + assert.Zero(t, readyConnections) // other connected then unregistered => not ok - rs.OnTunnelEvent(connection.Event{ + tracker.OnTunnelEvent(connection.Event{ Index: 1, EventType: connection.Connected, }) - code, ready = rs.makeResponse() + code, readyConnections = mockRequest(t, rs) assert.EqualValues(t, http.StatusOK, code) - assert.EqualValues(t, 1, ready) - rs.OnTunnelEvent(connection.Event{ + assert.EqualValues(t, 1, readyConnections) + tracker.OnTunnelEvent(connection.Event{ Index: 1, EventType: connection.Unregistering, }) - code, ready = rs.makeResponse() + code, readyConnections = mockRequest(t, rs) assert.NotEqualValues(t, http.StatusOK, code) - assert.Zero(t, ready) + assert.Zero(t, readyConnections) // other disconnected => not ok - rs.OnTunnelEvent(connection.Event{ + tracker.OnTunnelEvent(connection.Event{ Index: 1, EventType: connection.Disconnected, }) - code, ready = rs.makeResponse() + code, readyConnections = mockRequest(t, rs) assert.NotEqualValues(t, http.StatusOK, code) - assert.Zero(t, ready) + assert.Zero(t, readyConnections) } diff --git a/packet/decoder_test.go b/packet/decoder_test.go index b8770d74..4dc96886 100644 --- a/packet/decoder_test.go +++ b/packet/decoder_test.go @@ -254,3 +254,18 @@ func (u *UDP) EncodeLayers() ([]gopacket.SerializableLayer, error) { udpLayer.SetNetworkLayerForChecksum(ipLayers[0].(gopacket.NetworkLayer)) return append(ipLayers, &udpLayer), nil } + +func FuzzIPDecoder(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + ipDecoder := NewIPDecoder() + ipDecoder.Decode(RawPacket{Data: data}) + + }) +} + +func FuzzICMPDecoder(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + icmpDecoder := NewICMPDecoder() + icmpDecoder.Decode(RawPacket{Data: data}) + }) +} diff --git a/packet/encoder.go b/packet/encoder.go index 906d2b6d..09859f48 100644 --- a/packet/encoder.go +++ b/packet/encoder.go @@ -1,6 +1,8 @@ package packet -import "github.com/google/gopacket" +import ( + "github.com/google/gopacket" +) var ( serializeOpts = gopacket.SerializeOptions{ @@ -25,7 +27,7 @@ func NewEncoder() *Encoder { } } -func (e Encoder) Encode(packet Packet) (RawPacket, error) { +func (e *Encoder) Encode(packet Packet) (RawPacket, error) { encodedLayers, err := packet.EncodeLayers() if err != nil { return RawPacket{}, err diff --git a/proxy/logger.go b/proxy/logger.go index 1bde18e1..fa202964 100644 --- a/proxy/logger.go +++ b/proxy/logger.go @@ -16,11 +16,14 @@ const ( logFieldLBProbe = "lbProbe" logFieldRule = "ingressRule" logFieldOriginService = "originService" - logFieldFlowID = "flowID" logFieldConnIndex = "connIndex" logFieldDestAddr = "destAddr" ) +var ( + LogFieldFlowID = "flowID" +) + // newHTTPLogger creates a child zerolog.Logger from the provided with added context from the HTTP request, ingress // services, and connection index. func newHTTPLogger(logger *zerolog.Logger, connIndex uint8, req *http.Request, rule int, serviceName string) zerolog.Logger { @@ -47,7 +50,7 @@ func newTCPLogger(logger *zerolog.Logger, req *connection.TCPRequest) zerolog.Lo Int(management.EventTypeKey, int(management.TCP)). Uint8(logFieldConnIndex, req.ConnIndex). Str(logFieldOriginService, ingress.ServiceWarpRouting). - Str(logFieldFlowID, req.FlowID). + Str(LogFieldFlowID, req.FlowID). Str(logFieldDestAddr, req.Dest). Uint8(logFieldConnIndex, req.ConnIndex). Logger() diff --git a/quic/v3/datagram.go b/quic/v3/datagram.go new file mode 100644 index 00000000..136f8fbc --- /dev/null +++ b/quic/v3/datagram.go @@ -0,0 +1,430 @@ +package v3 + +import ( + "encoding/binary" + "net/netip" + "time" +) + +type DatagramType byte + +const ( + // UDP Registration + UDPSessionRegistrationType DatagramType = 0x0 + // UDP Session Payload + UDPSessionPayloadType DatagramType = 0x1 + // DatagramTypeICMP (supporting both ICMPv4 and ICMPv6) + ICMPType DatagramType = 0x2 + // UDP Session Registration Response + UDPSessionRegistrationResponseType DatagramType = 0x3 +) + +const ( + // Total number of bytes representing the [DatagramType] + datagramTypeLen = 1 + + // 1280 is the default datagram packet length used before MTU discovery: https://github.com/quic-go/quic-go/blob/v0.45.0/internal/protocol/params.go#L12 + maxDatagramPayloadLen = 1280 +) + +func ParseDatagramType(data []byte) (DatagramType, error) { + if len(data) < datagramTypeLen { + return 0, ErrDatagramHeaderTooSmall + } + return DatagramType(data[0]), nil +} + +// UDPSessionRegistrationDatagram handles a request to initialize a UDP session on the remote client. +type UDPSessionRegistrationDatagram struct { + RequestID RequestID + Dest netip.AddrPort + Traced bool + IdleDurationHint time.Duration + Payload []byte +} + +const ( + sessionRegistrationFlagsIPMask byte = 0b0000_0001 + sessionRegistrationFlagsTracedMask byte = 0b0000_0010 + sessionRegistrationFlagsBundledMask byte = 0b0000_0100 + + sessionRegistrationIPv4DatagramHeaderLen = datagramTypeLen + + 1 + // Flag length + 2 + // Destination port length + 2 + // Idle duration seconds length + datagramRequestIdLen + // Request ID length + 4 // IPv4 address length + + // The IPv4 and IPv6 address share space, so adding 12 to the header length gets the space taken by the IPv6 field. + sessionRegistrationIPv6DatagramHeaderLen = sessionRegistrationIPv4DatagramHeaderLen + 12 +) + +// The datagram structure for UDPSessionRegistrationDatagram is: +// +// 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// 0| Type | Flags | Destination Port | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// 4| Idle Duration Seconds | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// 8| | +// + Session Identifier + +// 12| (16 Bytes) | +// + + +// 16| | +// + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// 20| | Destination IPv4 Address | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+- - - - - - - - - - - - - - - -+ +// 24| Destination IPv4 Address cont | | +// +- - - - - - - - - - - - - - - - + +// 28| Destination IPv6 Address | +// + (extension of IPv4 region) + +// 32| | +// + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// 36| | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// . . +// . Bundle Payload . +// . . +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +func (s *UDPSessionRegistrationDatagram) MarshalBinary() (data []byte, err error) { + ipv6 := s.Dest.Addr().Is6() + var flags byte + if s.Traced { + flags |= sessionRegistrationFlagsTracedMask + } + hasPayload := len(s.Payload) > 0 + if hasPayload { + flags |= sessionRegistrationFlagsBundledMask + } + var maxPayloadLen int + if ipv6 { + maxPayloadLen = maxDatagramPayloadLen + sessionRegistrationIPv6DatagramHeaderLen + flags |= sessionRegistrationFlagsIPMask + } else { + maxPayloadLen = maxDatagramPayloadLen + sessionRegistrationIPv4DatagramHeaderLen + } + // Make sure that the payload being bundled can actually fit in the payload destination + if len(s.Payload) > maxPayloadLen { + return nil, wrapMarshalErr(ErrDatagramPayloadTooLarge) + } + // Allocate the buffer with the right size for the destination IP family + if ipv6 { + data = make([]byte, sessionRegistrationIPv6DatagramHeaderLen+len(s.Payload)) + } else { + data = make([]byte, sessionRegistrationIPv4DatagramHeaderLen+len(s.Payload)) + } + data[0] = byte(UDPSessionRegistrationType) + data[1] = byte(flags) + binary.BigEndian.PutUint16(data[2:4], s.Dest.Port()) + binary.BigEndian.PutUint16(data[4:6], uint16(s.IdleDurationHint.Seconds())) + err = s.RequestID.MarshalBinaryTo(data[6:22]) + if err != nil { + return nil, wrapMarshalErr(err) + } + var end int + if ipv6 { + copy(data[22:38], s.Dest.Addr().AsSlice()) + end = 38 + } else { + copy(data[22:26], s.Dest.Addr().AsSlice()) + end = 26 + } + + if hasPayload { + copy(data[end:], s.Payload) + } + + return data, nil +} + +func (s *UDPSessionRegistrationDatagram) UnmarshalBinary(data []byte) error { + datagramType, err := ParseDatagramType(data) + if err != nil { + return err + } + if datagramType != UDPSessionRegistrationType { + return wrapUnmarshalErr(ErrInvalidDatagramType) + } + + requestID, err := RequestIDFromSlice(data[6:22]) + if err != nil { + return wrapUnmarshalErr(err) + } + + traced := (data[1] & sessionRegistrationFlagsTracedMask) == sessionRegistrationFlagsTracedMask + bundled := (data[1] & sessionRegistrationFlagsBundledMask) == sessionRegistrationFlagsBundledMask + ipv6 := (data[1] & sessionRegistrationFlagsIPMask) == sessionRegistrationFlagsIPMask + + port := binary.BigEndian.Uint16(data[2:4]) + var datagramHeaderSize int + var dest netip.AddrPort + if ipv6 { + datagramHeaderSize = sessionRegistrationIPv6DatagramHeaderLen + dest = netip.AddrPortFrom(netip.AddrFrom16([16]byte(data[22:38])), port) + } else { + datagramHeaderSize = sessionRegistrationIPv4DatagramHeaderLen + dest = netip.AddrPortFrom(netip.AddrFrom4([4]byte(data[22:26])), port) + } + + idle := time.Duration(binary.BigEndian.Uint16(data[4:6])) * time.Second + + var payload []byte + if bundled && len(data) >= datagramHeaderSize && len(data[datagramHeaderSize:]) > 0 { + payload = data[datagramHeaderSize:] + } + + *s = UDPSessionRegistrationDatagram{ + RequestID: requestID, + Dest: dest, + Traced: traced, + IdleDurationHint: idle, + Payload: payload, + } + return nil +} + +// UDPSessionPayloadDatagram provides the payload for a session to be send to either the origin or the client. +type UDPSessionPayloadDatagram struct { + RequestID RequestID + Payload []byte +} + +const ( + DatagramPayloadHeaderLen = datagramTypeLen + datagramRequestIdLen + + // The maximum size that a proxied UDP payload can be in a [UDPSessionPayloadDatagram] + maxPayloadPlusHeaderLen = maxDatagramPayloadLen + DatagramPayloadHeaderLen +) + +// The datagram structure for UDPSessionPayloadDatagram is: +// +// 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// 0| Type | | +// +-+-+-+-+-+-+-+-+ + +// 4| | +// + + +// 8| Session Identifier | +// + (16 Bytes) + +// 12| | +// + +-+-+-+-+-+-+-+-+ +// 16| | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// . . +// . Payload . +// . . +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// MarshalPayloadHeaderTo provides a way to insert the Session Payload header into an already existing byte slice +// without having to allocate and copy the payload into the destination. +// +// This method should be used in-place of MarshalBinary which will allocate in-place the required byte array to return. +func MarshalPayloadHeaderTo(requestID RequestID, payload []byte) error { + if len(payload) < DatagramPayloadHeaderLen { + return wrapMarshalErr(ErrDatagramPayloadHeaderTooSmall) + } + payload[0] = byte(UDPSessionPayloadType) + return requestID.MarshalBinaryTo(payload[1:DatagramPayloadHeaderLen]) +} + +func (s *UDPSessionPayloadDatagram) UnmarshalBinary(data []byte) error { + datagramType, err := ParseDatagramType(data) + if err != nil { + return err + } + if datagramType != UDPSessionPayloadType { + return wrapUnmarshalErr(ErrInvalidDatagramType) + } + + // Make sure that the slice provided is the right size to be parsed. + if len(data) < DatagramPayloadHeaderLen || len(data) > maxPayloadPlusHeaderLen { + return wrapUnmarshalErr(ErrDatagramPayloadInvalidSize) + } + + requestID, err := RequestIDFromSlice(data[1:DatagramPayloadHeaderLen]) + if err != nil { + return wrapUnmarshalErr(err) + } + + *s = UDPSessionPayloadDatagram{ + RequestID: requestID, + Payload: data[DatagramPayloadHeaderLen:], + } + return nil +} + +// UDPSessionRegistrationResponseDatagram is used to either return a successful registration or error to the client +// that requested the registration of a UDP session. +type UDPSessionRegistrationResponseDatagram struct { + RequestID RequestID + ResponseType SessionRegistrationResp + ErrorMsg string +} + +const ( + datagramRespTypeLen = 1 + datagramRespErrMsgLen = 2 + + datagramSessionRegistrationResponseLen = datagramTypeLen + datagramRespTypeLen + datagramRequestIdLen + datagramRespErrMsgLen + + // The maximum size that an error message can be in a [UDPSessionRegistrationResponseDatagram]. + maxResponseErrorMessageLen = maxDatagramPayloadLen - datagramSessionRegistrationResponseLen +) + +// SessionRegistrationResp represents all of the responses that a UDP session registration response +// can return back to the client. +type SessionRegistrationResp byte + +const ( + // Session was received and is ready to proxy. + ResponseOk SessionRegistrationResp = 0x00 + // Session registration was unable to reach the requested origin destination. + ResponseDestinationUnreachable SessionRegistrationResp = 0x01 + // Session registration was unable to bind to a local UDP socket. + ResponseUnableToBindSocket SessionRegistrationResp = 0x02 + // Session registration failed with an unexpected error but provided a message. + ResponseErrorWithMsg SessionRegistrationResp = 0xff +) + +// The datagram structure for UDPSessionRegistrationResponseDatagram is: +// +// 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// 0| Type | Resp Type | | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +// 4| | +// + Session Identifier + +// 8| (16 Bytes) | +// + + +// 12| | +// + +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// 16| | Error Length | +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// . . +// . . +// . . +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +func (s *UDPSessionRegistrationResponseDatagram) MarshalBinary() (data []byte, err error) { + if len(s.ErrorMsg) > maxResponseErrorMessageLen { + return nil, wrapMarshalErr(ErrDatagramResponseMsgInvalidSize) + } + errMsgLen := uint16(len(s.ErrorMsg)) + + data = make([]byte, datagramSessionRegistrationResponseLen+errMsgLen) + data[0] = byte(UDPSessionRegistrationResponseType) + data[1] = byte(s.ResponseType) + err = s.RequestID.MarshalBinaryTo(data[2:18]) + if err != nil { + return nil, wrapMarshalErr(err) + } + + if errMsgLen > 0 { + binary.BigEndian.PutUint16(data[18:20], errMsgLen) + copy(data[20:], []byte(s.ErrorMsg)) + } + + return data, nil +} + +func (s *UDPSessionRegistrationResponseDatagram) UnmarshalBinary(data []byte) error { + datagramType, err := ParseDatagramType(data) + if err != nil { + return wrapUnmarshalErr(err) + } + if datagramType != UDPSessionRegistrationResponseType { + return wrapUnmarshalErr(ErrInvalidDatagramType) + } + + if len(data) < datagramSessionRegistrationResponseLen { + return wrapUnmarshalErr(ErrDatagramResponseInvalidSize) + } + + respType := SessionRegistrationResp(data[1]) + + requestID, err := RequestIDFromSlice(data[2:18]) + if err != nil { + return wrapUnmarshalErr(err) + } + + errMsgLen := binary.BigEndian.Uint16(data[18:20]) + if errMsgLen > maxResponseErrorMessageLen { + return wrapUnmarshalErr(ErrDatagramResponseMsgTooLargeMaximum) + } + + if len(data[20:]) < int(errMsgLen) { + return wrapUnmarshalErr(ErrDatagramResponseMsgTooLargeDatagram) + } + + var errMsg string + if errMsgLen > 0 { + errMsg = string(data[20:]) + } + + *s = UDPSessionRegistrationResponseDatagram{ + RequestID: requestID, + ResponseType: respType, + ErrorMsg: errMsg, + } + return nil +} + +// ICMPDatagram is used to propagate ICMPv4 and ICMPv6 payloads. +type ICMPDatagram struct { + Payload []byte +} + +// The maximum size that an ICMP packet can be. +const maxICMPPayloadLen = maxDatagramPayloadLen + +// The datagram structure for ICMPDatagram is: +// +// 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 0 1 2 3 4 5 6 7 +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ +// 0| Type | | +// +-+-+-+-+-+-+-+-+ + +// . Payload . +// . . +// . . +// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ + +func (d *ICMPDatagram) MarshalBinary() (data []byte, err error) { + if len(d.Payload) > maxICMPPayloadLen { + return nil, wrapMarshalErr(ErrDatagramICMPPayloadTooLarge) + } + // We shouldn't attempt to marshal an ICMP datagram with no ICMP payload provided + if len(d.Payload) == 0 { + return nil, wrapMarshalErr(ErrDatagramICMPPayloadMissing) + } + // Make room for the 1 byte ICMPType header + datagram := make([]byte, len(d.Payload)+datagramTypeLen) + datagram[0] = byte(ICMPType) + copy(datagram[1:], d.Payload) + return datagram, nil +} + +func (d *ICMPDatagram) UnmarshalBinary(data []byte) error { + datagramType, err := ParseDatagramType(data) + if err != nil { + return wrapUnmarshalErr(err) + } + if datagramType != ICMPType { + return wrapUnmarshalErr(ErrInvalidDatagramType) + } + + if len(data[1:]) > maxDatagramPayloadLen { + return wrapUnmarshalErr(ErrDatagramICMPPayloadTooLarge) + } + + // We shouldn't attempt to unmarshal an ICMP datagram with no ICMP payload provided + if len(data[1:]) == 0 { + return wrapUnmarshalErr(ErrDatagramICMPPayloadMissing) + } + + payload := make([]byte, len(data[1:])) + copy(payload, data[1:]) + d.Payload = payload + return nil +} diff --git a/quic/v3/datagram_errors.go b/quic/v3/datagram_errors.go new file mode 100644 index 00000000..cbe30abe --- /dev/null +++ b/quic/v3/datagram_errors.go @@ -0,0 +1,28 @@ +package v3 + +import ( + "errors" + "fmt" +) + +var ( + ErrInvalidDatagramType error = errors.New("invalid datagram type expected") + ErrDatagramHeaderTooSmall error = fmt.Errorf("datagram should have at least %d byte", datagramTypeLen) + ErrDatagramPayloadTooLarge error = errors.New("payload length is too large to be bundled in datagram") + ErrDatagramPayloadHeaderTooSmall error = errors.New("payload length is too small to fit the datagram header") + ErrDatagramPayloadInvalidSize error = errors.New("datagram provided is an invalid size") + ErrDatagramResponseMsgInvalidSize error = errors.New("datagram response message is an invalid size") + ErrDatagramResponseInvalidSize error = errors.New("datagram response is an invalid size") + ErrDatagramResponseMsgTooLargeMaximum error = fmt.Errorf("datagram response error message length exceeds the length of the datagram maximum: %d", maxResponseErrorMessageLen) + ErrDatagramResponseMsgTooLargeDatagram error = fmt.Errorf("datagram response error message length exceeds the length of the provided datagram") + ErrDatagramICMPPayloadTooLarge error = fmt.Errorf("datagram icmp payload exceeds %d bytes", maxICMPPayloadLen) + ErrDatagramICMPPayloadMissing error = errors.New("datagram icmp payload is missing") +) + +func wrapMarshalErr(err error) error { + return fmt.Errorf("datagram marshal error: %w", err) +} + +func wrapUnmarshalErr(err error) error { + return fmt.Errorf("datagram unmarshal error: %w", err) +} diff --git a/quic/v3/datagram_test.go b/quic/v3/datagram_test.go new file mode 100644 index 00000000..834c4ae4 --- /dev/null +++ b/quic/v3/datagram_test.go @@ -0,0 +1,449 @@ +package v3_test + +import ( + "encoding/binary" + "errors" + "net/netip" + "testing" + "time" + + "github.com/stretchr/testify/require" + + v3 "github.com/cloudflare/cloudflared/quic/v3" +) + +func makePayload(size int) []byte { + payload := make([]byte, size) + for i := range len(payload) { + payload[i] = 0xfc + } + return payload +} + +func TestSessionRegistration_MarshalUnmarshal(t *testing.T) { + payload := makePayload(1280) + tests := []*v3.UDPSessionRegistrationDatagram{ + // Default (IPv4) + { + RequestID: testRequestID, + Dest: netip.MustParseAddrPort("1.1.1.1:8080"), + Traced: false, + IdleDurationHint: 5 * time.Second, + Payload: nil, + }, + // Request ID (max) + { + RequestID: mustRequestID([16]byte{ + ^uint8(0), ^uint8(0), ^uint8(0), ^uint8(0), + ^uint8(0), ^uint8(0), ^uint8(0), ^uint8(0), + ^uint8(0), ^uint8(0), ^uint8(0), ^uint8(0), + ^uint8(0), ^uint8(0), ^uint8(0), ^uint8(0), + }), + Dest: netip.MustParseAddrPort("1.1.1.1:8080"), + Traced: false, + IdleDurationHint: 5 * time.Second, + Payload: nil, + }, + // IPv6 + { + RequestID: testRequestID, + Dest: netip.MustParseAddrPort("[fc00::0]:8080"), + Traced: false, + IdleDurationHint: 5 * time.Second, + Payload: nil, + }, + // Traced + { + RequestID: testRequestID, + Dest: netip.MustParseAddrPort("1.1.1.1:8080"), + Traced: true, + IdleDurationHint: 5 * time.Second, + Payload: nil, + }, + // IdleDurationHint (max) + { + RequestID: testRequestID, + Dest: netip.MustParseAddrPort("1.1.1.1:8080"), + Traced: false, + IdleDurationHint: 65535 * time.Second, + Payload: nil, + }, + // Payload + { + RequestID: testRequestID, + Dest: netip.MustParseAddrPort("1.1.1.1:8080"), + Traced: false, + IdleDurationHint: 5 * time.Second, + Payload: []byte{0xff, 0xaa, 0xcc, 0x44}, + }, + // Payload (max: 1254) for IPv4 + { + RequestID: testRequestID, + Dest: netip.MustParseAddrPort("1.1.1.1:8080"), + Traced: false, + IdleDurationHint: 5 * time.Second, + Payload: payload, + }, + // Payload (max: 1242) for IPv4 + { + RequestID: testRequestID, + Dest: netip.MustParseAddrPort("1.1.1.1:8080"), + Traced: false, + IdleDurationHint: 5 * time.Second, + Payload: payload[:1242], + }, + } + for _, tt := range tests { + marshaled, err := tt.MarshalBinary() + if err != nil { + t.Error(err) + } + unmarshaled := v3.UDPSessionRegistrationDatagram{} + err = unmarshaled.UnmarshalBinary(marshaled) + if err != nil { + t.Error(err) + } + if !compareRegistrationDatagrams(t, tt, &unmarshaled) { + t.Errorf("not equal:\n%+v\n%+v", tt, &unmarshaled) + } + } +} + +func TestSessionRegistration_MarshalBinary(t *testing.T) { + t.Run("idle hint too large", func(t *testing.T) { + // idle hint duration overflows back to 1 + datagram := &v3.UDPSessionRegistrationDatagram{ + RequestID: testRequestID, + Dest: netip.MustParseAddrPort("1.1.1.1:8080"), + Traced: false, + IdleDurationHint: 65537 * time.Second, + Payload: nil, + } + expected := &v3.UDPSessionRegistrationDatagram{ + RequestID: testRequestID, + Dest: netip.MustParseAddrPort("1.1.1.1:8080"), + Traced: false, + IdleDurationHint: 1 * time.Second, + Payload: nil, + } + marshaled, err := datagram.MarshalBinary() + if err != nil { + t.Error(err) + } + unmarshaled := v3.UDPSessionRegistrationDatagram{} + err = unmarshaled.UnmarshalBinary(marshaled) + if err != nil { + t.Error(err) + } + if !compareRegistrationDatagrams(t, expected, &unmarshaled) { + t.Errorf("not equal:\n%+v\n%+v", expected, &unmarshaled) + } + }) +} + +func TestTypeUnmarshalErrors(t *testing.T) { + t.Run("invalid length", func(t *testing.T) { + d1 := v3.UDPSessionRegistrationDatagram{} + err := d1.UnmarshalBinary([]byte{}) + if !errors.Is(err, v3.ErrDatagramHeaderTooSmall) { + t.Errorf("expected invalid length to throw error") + } + + d2 := v3.UDPSessionPayloadDatagram{} + err = d2.UnmarshalBinary([]byte{}) + if !errors.Is(err, v3.ErrDatagramHeaderTooSmall) { + t.Errorf("expected invalid length to throw error") + } + + d3 := v3.UDPSessionRegistrationResponseDatagram{} + err = d3.UnmarshalBinary([]byte{}) + if !errors.Is(err, v3.ErrDatagramHeaderTooSmall) { + t.Errorf("expected invalid length to throw error") + } + + d4 := v3.ICMPDatagram{} + err = d4.UnmarshalBinary([]byte{}) + if !errors.Is(err, v3.ErrDatagramHeaderTooSmall) { + t.Errorf("expected invalid length to throw error") + } + }) + + t.Run("invalid types", func(t *testing.T) { + d1 := v3.UDPSessionRegistrationDatagram{} + err := d1.UnmarshalBinary([]byte{byte(v3.UDPSessionRegistrationResponseType)}) + if !errors.Is(err, v3.ErrInvalidDatagramType) { + t.Errorf("expected invalid type to throw error") + } + + d2 := v3.UDPSessionPayloadDatagram{} + err = d2.UnmarshalBinary([]byte{byte(v3.UDPSessionRegistrationType)}) + if !errors.Is(err, v3.ErrInvalidDatagramType) { + t.Errorf("expected invalid type to throw error") + } + + d3 := v3.UDPSessionRegistrationResponseDatagram{} + err = d3.UnmarshalBinary([]byte{byte(v3.UDPSessionPayloadType)}) + if !errors.Is(err, v3.ErrInvalidDatagramType) { + t.Errorf("expected invalid type to throw error") + } + + d4 := v3.ICMPDatagram{} + err = d4.UnmarshalBinary([]byte{byte(v3.UDPSessionPayloadType)}) + if !errors.Is(err, v3.ErrInvalidDatagramType) { + t.Errorf("expected invalid type to throw error") + } + }) +} + +func TestSessionPayload(t *testing.T) { + t.Run("basic", func(t *testing.T) { + payload := makePayload(128) + err := v3.MarshalPayloadHeaderTo(testRequestID, payload[0:17]) + if err != nil { + t.Error(err) + } + unmarshaled := v3.UDPSessionPayloadDatagram{} + err = unmarshaled.UnmarshalBinary(payload) + if err != nil { + t.Error(err) + } + require.Equal(t, testRequestID, unmarshaled.RequestID) + require.Equal(t, payload[17:], unmarshaled.Payload) + }) + + t.Run("empty", func(t *testing.T) { + payload := makePayload(17) + err := v3.MarshalPayloadHeaderTo(testRequestID, payload) + if err != nil { + t.Error(err) + } + unmarshaled := v3.UDPSessionPayloadDatagram{} + err = unmarshaled.UnmarshalBinary(payload) + if err != nil { + t.Error(err) + } + require.Equal(t, testRequestID, unmarshaled.RequestID) + require.Equal(t, payload[17:], unmarshaled.Payload) + }) + + t.Run("header size too small", func(t *testing.T) { + payload := makePayload(16) + err := v3.MarshalPayloadHeaderTo(testRequestID, payload) + if !errors.Is(err, v3.ErrDatagramPayloadHeaderTooSmall) { + t.Errorf("expected an error") + } + }) + + t.Run("payload size too small", func(t *testing.T) { + payload := makePayload(17) + err := v3.MarshalPayloadHeaderTo(testRequestID, payload) + if err != nil { + t.Error(err) + } + unmarshaled := v3.UDPSessionPayloadDatagram{} + err = unmarshaled.UnmarshalBinary(payload[:16]) + if !errors.Is(err, v3.ErrDatagramPayloadInvalidSize) { + t.Errorf("expected an error: %s", err) + } + }) + + t.Run("payload size too large", func(t *testing.T) { + datagram := makePayload(17 + 1281) // 1280 is the largest payload size allowed + err := v3.MarshalPayloadHeaderTo(testRequestID, datagram) + if err != nil { + t.Error(err) + } + unmarshaled := v3.UDPSessionPayloadDatagram{} + err = unmarshaled.UnmarshalBinary(datagram[:]) + if !errors.Is(err, v3.ErrDatagramPayloadInvalidSize) { + t.Errorf("expected an error: %s", err) + } + }) +} + +func TestSessionRegistrationResponse(t *testing.T) { + validRespTypes := []v3.SessionRegistrationResp{ + v3.ResponseOk, + v3.ResponseDestinationUnreachable, + v3.ResponseUnableToBindSocket, + v3.ResponseErrorWithMsg, + } + t.Run("basic", func(t *testing.T) { + for _, responseType := range validRespTypes { + datagram := &v3.UDPSessionRegistrationResponseDatagram{ + RequestID: testRequestID, + ResponseType: responseType, + ErrorMsg: "test", + } + marshaled, err := datagram.MarshalBinary() + if err != nil { + t.Error(err) + } + unmarshaled := &v3.UDPSessionRegistrationResponseDatagram{} + err = unmarshaled.UnmarshalBinary(marshaled) + if err != nil { + t.Error(err) + } + require.Equal(t, datagram, unmarshaled) + } + }) + + t.Run("unsupported resp type is valid", func(t *testing.T) { + datagram := &v3.UDPSessionRegistrationResponseDatagram{ + RequestID: testRequestID, + ResponseType: v3.SessionRegistrationResp(0xfc), + ErrorMsg: "", + } + marshaled, err := datagram.MarshalBinary() + if err != nil { + t.Error(err) + } + unmarshaled := &v3.UDPSessionRegistrationResponseDatagram{} + err = unmarshaled.UnmarshalBinary(marshaled) + if err != nil { + t.Error(err) + } + require.Equal(t, datagram, unmarshaled) + }) + + t.Run("too small to unmarshal", func(t *testing.T) { + payload := makePayload(17) + payload[0] = byte(v3.UDPSessionRegistrationResponseType) + unmarshaled := &v3.UDPSessionRegistrationResponseDatagram{} + err := unmarshaled.UnmarshalBinary(payload) + if !errors.Is(err, v3.ErrDatagramResponseInvalidSize) { + t.Errorf("expected an error") + } + }) + + t.Run("error message too long", func(t *testing.T) { + message := "" + for i := 0; i < 1280; i++ { + message += "a" + } + datagram := &v3.UDPSessionRegistrationResponseDatagram{ + RequestID: testRequestID, + ResponseType: v3.SessionRegistrationResp(0xfc), + ErrorMsg: message, + } + _, err := datagram.MarshalBinary() + if !errors.Is(err, v3.ErrDatagramResponseMsgInvalidSize) { + t.Errorf("expected an error") + } + }) + + t.Run("error message too large to unmarshal", func(t *testing.T) { + payload := makePayload(1280) + payload[0] = byte(v3.UDPSessionRegistrationResponseType) + binary.BigEndian.PutUint16(payload[18:20], 1280) // larger than the datagram size could be + unmarshaled := &v3.UDPSessionRegistrationResponseDatagram{} + err := unmarshaled.UnmarshalBinary(payload) + if !errors.Is(err, v3.ErrDatagramResponseMsgTooLargeMaximum) { + t.Errorf("expected an error: %v", err) + } + }) + + t.Run("error message larger than provided buffer", func(t *testing.T) { + payload := makePayload(1000) + payload[0] = byte(v3.UDPSessionRegistrationResponseType) + binary.BigEndian.PutUint16(payload[18:20], 1001) // larger than the datagram size provided + unmarshaled := &v3.UDPSessionRegistrationResponseDatagram{} + err := unmarshaled.UnmarshalBinary(payload) + if !errors.Is(err, v3.ErrDatagramResponseMsgTooLargeDatagram) { + t.Errorf("expected an error: %v", err) + } + }) +} + +func TestICMPDatagram(t *testing.T) { + t.Run("basic", func(t *testing.T) { + payload := makePayload(128) + datagram := v3.ICMPDatagram{Payload: payload} + marshaled, err := datagram.MarshalBinary() + if err != nil { + t.Error(err) + } + unmarshaled := &v3.ICMPDatagram{} + err = unmarshaled.UnmarshalBinary(marshaled) + if err != nil { + t.Error(err) + } + require.Equal(t, payload, unmarshaled.Payload) + }) + + t.Run("payload size empty", func(t *testing.T) { + payload := []byte{} + datagram := v3.ICMPDatagram{Payload: payload} + _, err := datagram.MarshalBinary() + if !errors.Is(err, v3.ErrDatagramICMPPayloadMissing) { + t.Errorf("expected an error: %s", err) + } + payload = []byte{byte(v3.ICMPType)} + unmarshaled := &v3.ICMPDatagram{} + err = unmarshaled.UnmarshalBinary(payload) + if !errors.Is(err, v3.ErrDatagramICMPPayloadMissing) { + t.Errorf("expected an error: %s", err) + } + }) + + t.Run("payload size too large", func(t *testing.T) { + payload := makePayload(1280 + 1) // larger than the datagram size could be + datagram := v3.ICMPDatagram{Payload: payload} + _, err := datagram.MarshalBinary() + if !errors.Is(err, v3.ErrDatagramICMPPayloadTooLarge) { + t.Errorf("expected an error: %s", err) + } + payload = makePayload(1280 + 2) // larger than the datagram size could be + header + payload[0] = byte(v3.ICMPType) + unmarshaled := &v3.ICMPDatagram{} + err = unmarshaled.UnmarshalBinary(payload) + if !errors.Is(err, v3.ErrDatagramICMPPayloadTooLarge) { + t.Errorf("expected an error: %s", err) + } + }) +} + +func compareRegistrationDatagrams(t *testing.T, l *v3.UDPSessionRegistrationDatagram, r *v3.UDPSessionRegistrationDatagram) bool { + require.Equal(t, l.Payload, r.Payload) + return l.RequestID == r.RequestID && + l.Dest == r.Dest && + l.IdleDurationHint == r.IdleDurationHint && + l.Traced == r.Traced +} + +func FuzzRegistrationDatagram(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + unmarshaled := v3.UDPSessionRegistrationDatagram{} + err := unmarshaled.UnmarshalBinary(data) + if err == nil { + _, _ = unmarshaled.MarshalBinary() + } + }) +} + +func FuzzPayloadDatagram(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + unmarshaled := v3.UDPSessionPayloadDatagram{} + _ = unmarshaled.UnmarshalBinary(data) + }) +} + +func FuzzRegistrationResponseDatagram(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + unmarshaled := v3.UDPSessionRegistrationResponseDatagram{} + err := unmarshaled.UnmarshalBinary(data) + if err == nil { + _, _ = unmarshaled.MarshalBinary() + } + }) +} + +func FuzzICMPDatagram(f *testing.F) { + f.Fuzz(func(t *testing.T, data []byte) { + unmarshaled := v3.ICMPDatagram{} + err := unmarshaled.UnmarshalBinary(data) + if err == nil { + _, _ = unmarshaled.MarshalBinary() + } + }) +} diff --git a/quic/v3/icmp.go b/quic/v3/icmp.go new file mode 100644 index 00000000..e9e3cc01 --- /dev/null +++ b/quic/v3/icmp.go @@ -0,0 +1,52 @@ +package v3 + +import ( + "context" + + "github.com/rs/zerolog" + "go.opentelemetry.io/otel/trace" + + "github.com/cloudflare/cloudflared/ingress" + "github.com/cloudflare/cloudflared/packet" + "github.com/cloudflare/cloudflared/tracing" +) + +// packetResponder is an implementation of the [ingress.ICMPResponder] which provides the ICMP Flow manager the +// return path to return and ICMP Echo response back to the QUIC muxer. +type packetResponder struct { + datagramMuxer DatagramICMPWriter + connID uint8 +} + +func newPacketResponder(datagramMuxer DatagramICMPWriter, connID uint8) ingress.ICMPResponder { + return &packetResponder{ + datagramMuxer, + connID, + } +} + +func (pr *packetResponder) ConnectionIndex() uint8 { + return pr.connID +} + +func (pr *packetResponder) ReturnPacket(pk *packet.ICMP) error { + return pr.datagramMuxer.SendICMPPacket(pk) +} + +func (pr *packetResponder) AddTraceContext(tracedCtx *tracing.TracedContext, serializedIdentity []byte) { + // datagram v3 does not support tracing ICMP packets +} + +func (pr *packetResponder) RequestSpan(ctx context.Context, pk *packet.ICMP) (context.Context, trace.Span) { + // datagram v3 does not support tracing ICMP packets + return ctx, tracing.NewNoopSpan() +} + +func (pr *packetResponder) ReplySpan(ctx context.Context, logger *zerolog.Logger) (context.Context, trace.Span) { + // datagram v3 does not support tracing ICMP packets + return ctx, tracing.NewNoopSpan() +} + +func (pr *packetResponder) ExportSpan() { + // datagram v3 does not support tracing ICMP packets +} diff --git a/quic/v3/icmp_test.go b/quic/v3/icmp_test.go new file mode 100644 index 00000000..3189a571 --- /dev/null +++ b/quic/v3/icmp_test.go @@ -0,0 +1,45 @@ +package v3_test + +import ( + "context" + "testing" + + "github.com/cloudflare/cloudflared/ingress" + "github.com/cloudflare/cloudflared/packet" +) + +type noopICMPRouter struct{} + +func (noopICMPRouter) Request(ctx context.Context, pk *packet.ICMP, responder ingress.ICMPResponder) error { + return nil +} +func (noopICMPRouter) ConvertToTTLExceeded(pk *packet.ICMP, rawPacket packet.RawPacket) *packet.ICMP { + return nil +} + +type mockICMPRouter struct { + recv chan *packet.ICMP +} + +func newMockICMPRouter() *mockICMPRouter { + return &mockICMPRouter{ + recv: make(chan *packet.ICMP, 1), + } +} + +func (m *mockICMPRouter) Request(ctx context.Context, pk *packet.ICMP, responder ingress.ICMPResponder) error { + m.recv <- pk + return nil +} +func (mockICMPRouter) ConvertToTTLExceeded(pk *packet.ICMP, rawPacket packet.RawPacket) *packet.ICMP { + return packet.NewICMPTTLExceedPacket(pk.IP, rawPacket, testLocalAddr.AddrPort().Addr()) +} + +func assertICMPEqual(t *testing.T, expected *packet.ICMP, actual *packet.ICMP) { + if expected.Src != actual.Src { + t.Fatalf("Src address not equal: %+v\t%+v", expected, actual) + } + if expected.Dst != actual.Dst { + t.Fatalf("Dst address not equal: %+v\t%+v", expected, actual) + } +} diff --git a/quic/v3/manager.go b/quic/v3/manager.go new file mode 100644 index 00000000..f5b0667f --- /dev/null +++ b/quic/v3/manager.go @@ -0,0 +1,103 @@ +package v3 + +import ( + "errors" + "net" + "net/netip" + "sync" + + "github.com/rs/zerolog" +) + +var ( + // ErrSessionNotFound indicates that a session has not been registered yet for the request id. + ErrSessionNotFound = errors.New("flow not found") + // ErrSessionBoundToOtherConn is returned when a registration already exists for a different connection. + ErrSessionBoundToOtherConn = errors.New("flow is in use by another connection") + // ErrSessionAlreadyRegistered is returned when a registration already exists for this connection. + ErrSessionAlreadyRegistered = errors.New("flow is already registered for this connection") +) + +type SessionManager interface { + // RegisterSession will register a new session if it does not already exist for the request ID. + // During new session creation, the session will also bind the UDP socket for the origin. + // If the session exists for a different connection, it will return [ErrSessionBoundToOtherConn]. + RegisterSession(request *UDPSessionRegistrationDatagram, conn DatagramConn) (Session, error) + // GetSession returns an active session if available for the provided connection. + // If the session does not exist, it will return [ErrSessionNotFound]. If the session exists for a different + // connection, it will return [ErrSessionBoundToOtherConn]. + GetSession(requestID RequestID) (Session, error) + // UnregisterSession will remove a session from the current session manager. It will attempt to close the session + // before removal. + UnregisterSession(requestID RequestID) +} + +type DialUDP func(dest netip.AddrPort) (*net.UDPConn, error) + +type sessionManager struct { + sessions map[RequestID]Session + mutex sync.RWMutex + originDialer DialUDP + metrics Metrics + log *zerolog.Logger +} + +func NewSessionManager(metrics Metrics, log *zerolog.Logger, originDialer DialUDP) SessionManager { + return &sessionManager{ + sessions: make(map[RequestID]Session), + originDialer: originDialer, + metrics: metrics, + log: log, + } +} + +func (s *sessionManager) RegisterSession(request *UDPSessionRegistrationDatagram, conn DatagramConn) (Session, error) { + s.mutex.Lock() + defer s.mutex.Unlock() + // Check to make sure session doesn't already exist for requestID + if session, exists := s.sessions[request.RequestID]; exists { + if conn.ID() == session.ConnectionID() { + return nil, ErrSessionAlreadyRegistered + } + return nil, ErrSessionBoundToOtherConn + } + // Attempt to bind the UDP socket for the new session + origin, err := s.originDialer(request.Dest) + if err != nil { + return nil, err + } + // Create and insert the new session in the map + session := NewSession( + request.RequestID, + request.IdleDurationHint, + origin, + origin.RemoteAddr(), + origin.LocalAddr(), + conn, + s.metrics, + s.log) + s.sessions[request.RequestID] = session + return session, nil +} + +func (s *sessionManager) GetSession(requestID RequestID) (Session, error) { + s.mutex.RLock() + defer s.mutex.RUnlock() + session, exists := s.sessions[requestID] + if exists { + return session, nil + } + return nil, ErrSessionNotFound +} + +func (s *sessionManager) UnregisterSession(requestID RequestID) { + s.mutex.Lock() + defer s.mutex.Unlock() + // Get the session and make sure to close it if it isn't already closed + session, exists := s.sessions[requestID] + if exists { + // We ignore any errors when attempting to close the session + _ = session.Close() + } + delete(s.sessions, requestID) +} diff --git a/quic/v3/manager_test.go b/quic/v3/manager_test.go new file mode 100644 index 00000000..71defadd --- /dev/null +++ b/quic/v3/manager_test.go @@ -0,0 +1,80 @@ +package v3_test + +import ( + "errors" + "net/netip" + "strings" + "testing" + "time" + + "github.com/rs/zerolog" + + "github.com/cloudflare/cloudflared/ingress" + v3 "github.com/cloudflare/cloudflared/quic/v3" +) + +func TestRegisterSession(t *testing.T) { + log := zerolog.Nop() + manager := v3.NewSessionManager(&noopMetrics{}, &log, ingress.DialUDPAddrPort) + + request := v3.UDPSessionRegistrationDatagram{ + RequestID: testRequestID, + Dest: netip.MustParseAddrPort("127.0.0.1:5000"), + Traced: false, + IdleDurationHint: 5 * time.Second, + Payload: nil, + } + session, err := manager.RegisterSession(&request, &noopEyeball{}) + if err != nil { + t.Fatalf("register session should've succeeded: %v", err) + } + if request.RequestID != session.ID() { + t.Fatalf("session id doesn't match: %v != %v", request.RequestID, session.ID()) + } + + // We shouldn't be able to register another session with the same request id + _, err = manager.RegisterSession(&request, &noopEyeball{}) + if !errors.Is(err, v3.ErrSessionAlreadyRegistered) { + t.Fatalf("session is already registered for this connection: %v", err) + } + + // We shouldn't be able to register another session with the same request id for a different connection + _, err = manager.RegisterSession(&request, &noopEyeball{connID: 1}) + if !errors.Is(err, v3.ErrSessionBoundToOtherConn) { + t.Fatalf("session is already registered for a separate connection: %v", err) + } + + // Get session + sessionGet, err := manager.GetSession(request.RequestID) + if err != nil { + t.Fatalf("get session failed: %v", err) + } + if session.ID() != sessionGet.ID() { + t.Fatalf("session's do not match: %v != %v", session.ID(), sessionGet.ID()) + } + + // Remove the session + manager.UnregisterSession(request.RequestID) + + // Get session should fail + _, err = manager.GetSession(request.RequestID) + if !errors.Is(err, v3.ErrSessionNotFound) { + t.Fatalf("get session failed: %v", err) + } + + // Closing the original session should return that the socket is already closed (by the session unregistration) + err = session.Close() + if err != nil && !strings.Contains(err.Error(), "use of closed network connection") { + t.Fatalf("session should've closed without issue: %v", err) + } +} + +func TestGetSession_Empty(t *testing.T) { + log := zerolog.Nop() + manager := v3.NewSessionManager(&noopMetrics{}, &log, ingress.DialUDPAddrPort) + + _, err := manager.GetSession(testRequestID) + if !errors.Is(err, v3.ErrSessionNotFound) { + t.Fatalf("get session find no session: %v", err) + } +} diff --git a/quic/v3/metrics.go b/quic/v3/metrics.go new file mode 100644 index 00000000..8f9cf19e --- /dev/null +++ b/quic/v3/metrics.go @@ -0,0 +1,90 @@ +package v3 + +import ( + "github.com/prometheus/client_golang/prometheus" +) + +const ( + namespace = "cloudflared" + subsystem = "udp" +) + +type Metrics interface { + IncrementFlows() + DecrementFlows() + PayloadTooLarge() + RetryFlowResponse() + MigrateFlow() +} + +type metrics struct { + activeUDPFlows prometheus.Gauge + totalUDPFlows prometheus.Counter + payloadTooLarge prometheus.Counter + retryFlowResponses prometheus.Counter + migratedFlows prometheus.Counter +} + +func (m *metrics) IncrementFlows() { + m.totalUDPFlows.Inc() + m.activeUDPFlows.Inc() +} + +func (m *metrics) DecrementFlows() { + m.activeUDPFlows.Dec() +} + +func (m *metrics) PayloadTooLarge() { + m.payloadTooLarge.Inc() +} + +func (m *metrics) RetryFlowResponse() { + m.retryFlowResponses.Inc() +} + +func (m *metrics) MigrateFlow() { + m.migratedFlows.Inc() +} + +func NewMetrics(registerer prometheus.Registerer) Metrics { + m := &metrics{ + activeUDPFlows: prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "active_flows", + Help: "Concurrent count of UDP flows that are being proxied to any origin", + }), + totalUDPFlows: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "total_flows", + Help: "Total count of UDP flows that have been proxied to any origin", + }), + payloadTooLarge: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "payload_too_large", + Help: "Total count of UDP flows that have had origin payloads that are too large to proxy", + }), + retryFlowResponses: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "retry_flow_responses", + Help: "Total count of UDP flows that have had to send their registration response more than once", + }), + migratedFlows: prometheus.NewCounter(prometheus.CounterOpts{ + Namespace: namespace, + Subsystem: subsystem, + Name: "migrated_flows", + Help: "Total count of UDP flows have been migrated across local connections", + }), + } + registerer.MustRegister( + m.activeUDPFlows, + m.totalUDPFlows, + m.payloadTooLarge, + m.retryFlowResponses, + m.migratedFlows, + ) + return m +} diff --git a/quic/v3/metrics_test.go b/quic/v3/metrics_test.go new file mode 100644 index 00000000..5f6b18a7 --- /dev/null +++ b/quic/v3/metrics_test.go @@ -0,0 +1,9 @@ +package v3_test + +type noopMetrics struct{} + +func (noopMetrics) IncrementFlows() {} +func (noopMetrics) DecrementFlows() {} +func (noopMetrics) PayloadTooLarge() {} +func (noopMetrics) RetryFlowResponse() {} +func (noopMetrics) MigrateFlow() {} diff --git a/quic/v3/muxer.go b/quic/v3/muxer.go new file mode 100644 index 00000000..ed688fea --- /dev/null +++ b/quic/v3/muxer.go @@ -0,0 +1,397 @@ +package v3 + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/rs/zerolog" + + "github.com/cloudflare/cloudflared/ingress" + "github.com/cloudflare/cloudflared/packet" +) + +const ( + // Allocating a 16 channel buffer here allows for the writer to be slightly faster than the reader. + // This has worked previously well for datagramv2, so we will start with this as well + demuxChanCapacity = 16 + + logSrcKey = "src" + logDstKey = "dst" + logICMPTypeKey = "type" + logDurationKey = "durationMS" +) + +// DatagramConn is the bridge that multiplexes writes and reads of datagrams for UDP sessions and ICMP packets to +// a connection. +type DatagramConn interface { + DatagramUDPWriter + DatagramICMPWriter + // Serve provides a server interface to process and handle incoming QUIC datagrams and demux their datagram v3 payloads. + Serve(context.Context) error + // ID indicates connection index identifier + ID() uint8 +} + +// DatagramUDPWriter provides the Muxer interface to create proper UDP Datagrams when sending over a connection. +type DatagramUDPWriter interface { + SendUDPSessionDatagram(datagram []byte) error + SendUDPSessionResponse(id RequestID, resp SessionRegistrationResp) error +} + +// DatagramICMPWriter provides the Muxer interface to create ICMP Datagrams when sending over a connection. +type DatagramICMPWriter interface { + SendICMPPacket(icmp *packet.ICMP) error + SendICMPTTLExceed(icmp *packet.ICMP, rawPacket packet.RawPacket) error +} + +// QuicConnection provides an interface that matches [quic.Connection] for only the datagram operations. +// +// We currently rely on the mutex for the [quic.Connection.SendDatagram] and [quic.Connection.ReceiveDatagram] and +// do not have any locking for them. If the implementation in quic-go were to ever change, we would need to make +// sure that we lock properly on these operations. +type QuicConnection interface { + Context() context.Context + SendDatagram(payload []byte) error + ReceiveDatagram(context.Context) ([]byte, error) +} + +type datagramConn struct { + conn QuicConnection + index uint8 + sessionManager SessionManager + icmpRouter ingress.ICMPRouter + metrics Metrics + logger *zerolog.Logger + + datagrams chan []byte + readErrors chan error + + icmpEncoderPool sync.Pool // a pool of *packet.Encoder + icmpDecoder *packet.ICMPDecoder +} + +func NewDatagramConn(conn QuicConnection, sessionManager SessionManager, icmpRouter ingress.ICMPRouter, index uint8, metrics Metrics, logger *zerolog.Logger) DatagramConn { + log := logger.With().Uint8("datagramVersion", 3).Logger() + return &datagramConn{ + conn: conn, + index: index, + sessionManager: sessionManager, + icmpRouter: icmpRouter, + metrics: metrics, + logger: &log, + datagrams: make(chan []byte, demuxChanCapacity), + readErrors: make(chan error, 2), + icmpEncoderPool: sync.Pool{ + New: func() any { + return packet.NewEncoder() + }, + }, + icmpDecoder: packet.NewICMPDecoder(), + } +} + +func (c *datagramConn) ID() uint8 { + return c.index +} + +func (c *datagramConn) SendUDPSessionDatagram(datagram []byte) error { + return c.conn.SendDatagram(datagram) +} + +func (c *datagramConn) SendUDPSessionResponse(id RequestID, resp SessionRegistrationResp) error { + datagram := UDPSessionRegistrationResponseDatagram{ + RequestID: id, + ResponseType: resp, + } + data, err := datagram.MarshalBinary() + if err != nil { + return err + } + return c.conn.SendDatagram(data) +} + +func (c *datagramConn) SendICMPPacket(icmp *packet.ICMP) error { + cachedEncoder := c.icmpEncoderPool.Get() + // The encoded packet is a slice to a buffer owned by the encoder, so we shouldn't return the encoder back to the + // pool until the encoded packet is sent. + defer c.icmpEncoderPool.Put(cachedEncoder) + encoder, ok := cachedEncoder.(*packet.Encoder) + if !ok { + return fmt.Errorf("encoderPool returned %T, expect *packet.Encoder", cachedEncoder) + } + payload, err := encoder.Encode(icmp) + if err != nil { + return err + } + icmpDatagram := ICMPDatagram{ + Payload: payload.Data, + } + datagram, err := icmpDatagram.MarshalBinary() + if err != nil { + return err + } + return c.conn.SendDatagram(datagram) +} + +func (c *datagramConn) SendICMPTTLExceed(icmp *packet.ICMP, rawPacket packet.RawPacket) error { + return c.SendICMPPacket(c.icmpRouter.ConvertToTTLExceeded(icmp, rawPacket)) +} + +var errReadTimeout error = errors.New("receive datagram timeout") + +// pollDatagrams will read datagrams from the underlying connection until the provided context is done. +func (c *datagramConn) pollDatagrams(ctx context.Context) { + for ctx.Err() == nil { + datagram, err := c.conn.ReceiveDatagram(ctx) + // If the read returns an error, we want to return the failure to the channel. + if err != nil { + c.readErrors <- err + return + } + c.datagrams <- datagram + } + if ctx.Err() != nil { + c.readErrors <- ctx.Err() + } +} + +// Serve will begin the process of receiving datagrams from the [quic.Connection] and demuxing them to their destination. +// The [DatagramConn] when serving, will be responsible for the sessions it accepts. +func (c *datagramConn) Serve(ctx context.Context) error { + connCtx := c.conn.Context() + // We want to make sure that we cancel the reader context if the Serve method returns. This could also mean that the + // underlying connection is also closing, but that is handled outside of the context of the datagram muxer. + readCtx, cancel := context.WithCancel(connCtx) + defer cancel() + go c.pollDatagrams(readCtx) + for { + // We make sure to monitor the context of cloudflared and the underlying connection to return if any errors occur. + var datagram []byte + select { + // Monitor the context of cloudflared + case <-ctx.Done(): + return ctx.Err() + // Monitor the context of the underlying connection + case <-connCtx.Done(): + return connCtx.Err() + // Monitor for any hard errors from reading the connection + case err := <-c.readErrors: + return err + // Otherwise, wait and dequeue datagrams as they come in + case d := <-c.datagrams: + datagram = d + } + + // Each incoming datagram will be processed in a new go routine to handle the demuxing and action associated. + go func() { + typ, err := ParseDatagramType(datagram) + if err != nil { + c.logger.Err(err).Msgf("unable to parse datagram type: %d", typ) + return + } + switch typ { + case UDPSessionRegistrationType: + reg := &UDPSessionRegistrationDatagram{} + err := reg.UnmarshalBinary(datagram) + if err != nil { + c.logger.Err(err).Msgf("unable to unmarshal session registration datagram") + return + } + logger := c.logger.With().Str(logFlowID, reg.RequestID.String()).Logger() + // We bind the new session to the quic connection context instead of cloudflared context to allow for the + // quic connection to close and close only the sessions bound to it. Closing of cloudflared will also + // initiate the close of the quic connection, so we don't have to worry about the application context + // in the scope of a session. + c.handleSessionRegistrationDatagram(connCtx, reg, &logger) + case UDPSessionPayloadType: + payload := &UDPSessionPayloadDatagram{} + err := payload.UnmarshalBinary(datagram) + if err != nil { + c.logger.Err(err).Msgf("unable to unmarshal session payload datagram") + return + } + logger := c.logger.With().Str(logFlowID, payload.RequestID.String()).Logger() + c.handleSessionPayloadDatagram(payload, &logger) + case ICMPType: + packet := &ICMPDatagram{} + err := packet.UnmarshalBinary(datagram) + if err != nil { + c.logger.Err(err).Msgf("unable to unmarshal icmp datagram") + return + } + c.handleICMPPacket(packet) + case UDPSessionRegistrationResponseType: + // cloudflared should never expect to receive UDP session responses as it will not initiate new + // sessions towards the edge. + c.logger.Error().Msgf("unexpected datagram type received: %d", UDPSessionRegistrationResponseType) + return + default: + c.logger.Error().Msgf("unknown datagram type received: %d", typ) + } + }() + } +} + +// This method handles new registrations of a session and the serve loop for the session. +func (c *datagramConn) handleSessionRegistrationDatagram(ctx context.Context, datagram *UDPSessionRegistrationDatagram, logger *zerolog.Logger) { + log := logger.With(). + Str(logFlowID, datagram.RequestID.String()). + Str(logDstKey, datagram.Dest.String()). + Logger() + session, err := c.sessionManager.RegisterSession(datagram, c) + switch err { + case nil: + // Continue as normal + case ErrSessionAlreadyRegistered: + // Session is already registered and likely the response got lost + c.handleSessionAlreadyRegistered(datagram.RequestID, &log) + return + case ErrSessionBoundToOtherConn: + // Session is already registered but to a different connection + c.handleSessionMigration(datagram.RequestID, &log) + return + default: + log.Err(err).Msgf("flow registration failure") + c.handleSessionRegistrationFailure(datagram.RequestID, &log) + return + } + log = log.With().Str(logSrcKey, session.LocalAddr().String()).Logger() + c.metrics.IncrementFlows() + // Make sure to eventually remove the session from the session manager when the session is closed + defer c.sessionManager.UnregisterSession(session.ID()) + defer c.metrics.DecrementFlows() + + // Respond that we are able to process the new session + err = c.SendUDPSessionResponse(datagram.RequestID, ResponseOk) + if err != nil { + log.Err(err).Msgf("flow registration failure: unable to send session registration response") + return + } + + // We bind the context of the session to the [quic.Connection] that initiated the session. + // [Session.Serve] is blocking and will continue this go routine till the end of the session lifetime. + start := time.Now() + err = session.Serve(ctx) + elapsedMS := time.Now().Sub(start).Milliseconds() + log = log.With().Int64(logDurationKey, elapsedMS).Logger() + if err == nil { + // We typically don't expect a session to close without some error response. [SessionIdleErr] is the typical + // expected error response. + log.Warn().Msg("flow closed: no explicit close or timeout elapsed") + return + } + // SessionIdleErr and SessionCloseErr are valid and successful error responses to end a session. + if errors.Is(err, SessionIdleErr{}) || errors.Is(err, SessionCloseErr) { + log.Debug().Msgf("flow closed: %s", err.Error()) + return + } + + // All other errors should be reported as errors + log.Err(err).Msgf("flow closed with an error") +} + +func (c *datagramConn) handleSessionAlreadyRegistered(requestID RequestID, logger *zerolog.Logger) { + // Send another registration response since the session is already active + err := c.SendUDPSessionResponse(requestID, ResponseOk) + if err != nil { + logger.Err(err).Msgf("flow registration failure: unable to send an additional flow registration response") + return + } + + session, err := c.sessionManager.GetSession(requestID) + if err != nil { + // If for some reason we can not find the session after attempting to register it, we can just return + // instead of trying to reset the idle timer for it. + return + } + // The session is already running in another routine so we want to restart the idle timeout since no proxied + // packets have come down yet. + session.ResetIdleTimer() + c.metrics.RetryFlowResponse() + logger.Debug().Msgf("flow registration response retry") +} + +func (c *datagramConn) handleSessionMigration(requestID RequestID, logger *zerolog.Logger) { + // We need to migrate the currently running session to this edge connection. + session, err := c.sessionManager.GetSession(requestID) + if err != nil { + // If for some reason we can not find the session after attempting to register it, we can just return + // instead of trying to reset the idle timer for it. + return + } + + // Migrate the session to use this edge connection instead of the currently running one. + // We also pass in this connection's logger to override the existing logger for the session. + session.Migrate(c, c.conn.Context(), c.logger) + + // Send another registration response since the session is already active + err = c.SendUDPSessionResponse(requestID, ResponseOk) + if err != nil { + logger.Err(err).Msgf("flow registration failure: unable to send an additional flow registration response") + return + } + logger.Debug().Msgf("flow registration migration") +} + +func (c *datagramConn) handleSessionRegistrationFailure(requestID RequestID, logger *zerolog.Logger) { + err := c.SendUDPSessionResponse(requestID, ResponseUnableToBindSocket) + if err != nil { + logger.Err(err).Msgf("unable to send flow registration error response (%d)", ResponseUnableToBindSocket) + } +} + +// Handles incoming datagrams that need to be sent to a registered session. +func (c *datagramConn) handleSessionPayloadDatagram(datagram *UDPSessionPayloadDatagram, logger *zerolog.Logger) { + s, err := c.sessionManager.GetSession(datagram.RequestID) + if err != nil { + logger.Err(err).Msgf("unable to find flow") + return + } + // We ignore the bytes written to the socket because any partial write must return an error. + _, err = s.Write(datagram.Payload) + if err != nil { + logger.Err(err).Msgf("unable to write payload for the flow") + return + } +} + +// Handles incoming ICMP datagrams. +func (c *datagramConn) handleICMPPacket(datagram *ICMPDatagram) { + if c.icmpRouter == nil { + // ICMPRouter is disabled so we drop the current packet and ignore all incoming ICMP packets + return + } + + // Decode the provided ICMPDatagram as an ICMP packet + rawPacket := packet.RawPacket{Data: datagram.Payload} + icmp, err := c.icmpDecoder.Decode(rawPacket) + if err != nil { + c.logger.Err(err).Msgf("unable to marshal icmp packet") + return + } + + // If the ICMP packet's TTL is expired, we won't send it to the origin and immediately return a TTL Exceeded Message + if icmp.TTL <= 1 { + if err := c.SendICMPTTLExceed(icmp, rawPacket); err != nil { + c.logger.Err(err).Msg("failed to return ICMP TTL exceed error") + } + return + } + icmp.TTL-- + + // The context isn't really needed here since it's only really used throughout the ICMP router as a way to store + // the tracing context, however datagram V3 does not support tracing ICMP packets, so we just pass the current + // connection context which will have no tracing information available. + err = c.icmpRouter.Request(c.conn.Context(), icmp, newPacketResponder(c, c.index)) + if err != nil { + c.logger.Err(err). + Str(logSrcKey, icmp.Src.String()). + Str(logDstKey, icmp.Dst.String()). + Interface(logICMPTypeKey, icmp.Type). + Msgf("unable to write icmp datagram to origin") + return + } +} diff --git a/quic/v3/muxer_test.go b/quic/v3/muxer_test.go new file mode 100644 index 00000000..7b532ba3 --- /dev/null +++ b/quic/v3/muxer_test.go @@ -0,0 +1,782 @@ +package v3_test + +import ( + "bytes" + "context" + "errors" + "net" + "net/netip" + "slices" + "sync" + "testing" + "time" + + "github.com/google/gopacket/layers" + "github.com/rs/zerolog" + "golang.org/x/net/icmp" + "golang.org/x/net/ipv4" + + "github.com/cloudflare/cloudflared/ingress" + "github.com/cloudflare/cloudflared/packet" + v3 "github.com/cloudflare/cloudflared/quic/v3" +) + +type noopEyeball struct { + connID uint8 +} + +func (noopEyeball) Serve(ctx context.Context) error { return nil } +func (n noopEyeball) ID() uint8 { return n.connID } +func (noopEyeball) SendUDPSessionDatagram(datagram []byte) error { return nil } +func (noopEyeball) SendUDPSessionResponse(id v3.RequestID, resp v3.SessionRegistrationResp) error { + return nil +} +func (noopEyeball) SendICMPPacket(icmp *packet.ICMP) error { return nil } +func (noopEyeball) SendICMPTTLExceed(icmp *packet.ICMP, rawPacket packet.RawPacket) error { return nil } + +type mockEyeball struct { + connID uint8 + // datagram sent via SendUDPSessionDatagram + recvData chan []byte + // responses sent via SendUDPSessionResponse + recvResp chan struct { + id v3.RequestID + resp v3.SessionRegistrationResp + } +} + +func newMockEyeball() mockEyeball { + return mockEyeball{ + connID: 0, + recvData: make(chan []byte, 1), + recvResp: make(chan struct { + id v3.RequestID + resp v3.SessionRegistrationResp + }, 1), + } +} + +func (mockEyeball) Serve(ctx context.Context) error { return nil } +func (m *mockEyeball) ID() uint8 { return m.connID } + +func (m *mockEyeball) SendUDPSessionDatagram(datagram []byte) error { + b := make([]byte, len(datagram)) + copy(b, datagram) + m.recvData <- b + return nil +} + +func (m *mockEyeball) SendUDPSessionResponse(id v3.RequestID, resp v3.SessionRegistrationResp) error { + m.recvResp <- struct { + id v3.RequestID + resp v3.SessionRegistrationResp + }{ + id, resp, + } + return nil +} + +func (m *mockEyeball) SendICMPPacket(icmp *packet.ICMP) error { return nil } +func (m *mockEyeball) SendICMPTTLExceed(icmp *packet.ICMP, rawPacket packet.RawPacket) error { + return nil +} + +func TestDatagramConn_New(t *testing.T) { + log := zerolog.Nop() + conn := v3.NewDatagramConn(newMockQuicConn(), v3.NewSessionManager(&noopMetrics{}, &log, ingress.DialUDPAddrPort), &noopICMPRouter{}, 0, &noopMetrics{}, &log) + if conn == nil { + t.Fatal("expected valid connection") + } +} + +func TestDatagramConn_SendUDPSessionDatagram(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + conn := v3.NewDatagramConn(quic, v3.NewSessionManager(&noopMetrics{}, &log, ingress.DialUDPAddrPort), &noopICMPRouter{}, 0, &noopMetrics{}, &log) + + payload := []byte{0xef, 0xef} + conn.SendUDPSessionDatagram(payload) + p := <-quic.recv + if !slices.Equal(p, payload) { + t.Fatal("datagram sent does not match datagram received on quic side") + } +} + +func TestDatagramConn_SendUDPSessionResponse(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + conn := v3.NewDatagramConn(quic, v3.NewSessionManager(&noopMetrics{}, &log, ingress.DialUDPAddrPort), &noopICMPRouter{}, 0, &noopMetrics{}, &log) + + conn.SendUDPSessionResponse(testRequestID, v3.ResponseDestinationUnreachable) + resp := <-quic.recv + var response v3.UDPSessionRegistrationResponseDatagram + err := response.UnmarshalBinary(resp) + if err != nil { + t.Fatal(err) + } + expected := v3.UDPSessionRegistrationResponseDatagram{ + RequestID: testRequestID, + ResponseType: v3.ResponseDestinationUnreachable, + } + if response != expected { + t.Fatal("datagram response sent does not match expected datagram response received") + } +} + +func TestDatagramConnServe_ApplicationClosed(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + conn := v3.NewDatagramConn(quic, v3.NewSessionManager(&noopMetrics{}, &log, ingress.DialUDPAddrPort), &noopICMPRouter{}, 0, &noopMetrics{}, &log) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + err := conn.Serve(ctx) + if !errors.Is(err, context.DeadlineExceeded) { + t.Fatal(err) + } +} + +func TestDatagramConnServe_ConnectionClosed(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + quic.ctx = ctx + conn := v3.NewDatagramConn(quic, v3.NewSessionManager(&noopMetrics{}, &log, ingress.DialUDPAddrPort), &noopICMPRouter{}, 0, &noopMetrics{}, &log) + + err := conn.Serve(context.Background()) + if !errors.Is(err, context.DeadlineExceeded) { + t.Fatal(err) + } +} + +func TestDatagramConnServe_ReceiveDatagramError(t *testing.T) { + log := zerolog.Nop() + quic := &mockQuicConnReadError{err: net.ErrClosed} + conn := v3.NewDatagramConn(quic, v3.NewSessionManager(&noopMetrics{}, &log, ingress.DialUDPAddrPort), &noopICMPRouter{}, 0, &noopMetrics{}, &log) + + err := conn.Serve(context.Background()) + if !errors.Is(err, net.ErrClosed) { + t.Fatal(err) + } +} + +func TestDatagramConnServe_ErrorDatagramTypes(t *testing.T) { + for _, test := range []struct { + name string + input []byte + expected string + }{ + { + "empty", + []byte{}, + "{\"level\":\"error\",\"datagramVersion\":3,\"error\":\"datagram should have at least 1 byte\",\"message\":\"unable to parse datagram type: 0\"}\n", + }, + { + "unexpected", + []byte{byte(v3.UDPSessionRegistrationResponseType)}, + "{\"level\":\"error\",\"datagramVersion\":3,\"message\":\"unexpected datagram type received: 3\"}\n", + }, + { + "unknown", + []byte{99}, + "{\"level\":\"error\",\"datagramVersion\":3,\"message\":\"unknown datagram type received: 99\"}\n", + }, + } { + t.Run(test.name, func(t *testing.T) { + logOutput := new(LockedBuffer) + log := zerolog.New(logOutput) + quic := newMockQuicConn() + quic.send <- test.input + conn := v3.NewDatagramConn(quic, &mockSessionManager{}, &noopICMPRouter{}, 0, &noopMetrics{}, &log) + + ctx, cancel := context.WithTimeout(context.Background(), 1*time.Second) + defer cancel() + err := conn.Serve(ctx) + // we cancel the Serve method to check to see if the log output was written since the unsupported datagram + // is dropped with only a log message as a side-effect. + if !errors.Is(err, context.DeadlineExceeded) { + t.Fatal(err) + } + + out := logOutput.String() + if out != test.expected { + t.Fatalf("incorrect log output expected: %s", out) + } + }) + } +} + +type LockedBuffer struct { + bytes.Buffer + l sync.Mutex +} + +func (b *LockedBuffer) Write(p []byte) (n int, err error) { + b.l.Lock() + defer b.l.Unlock() + return b.Buffer.Write(p) +} + +func (b *LockedBuffer) String() string { + b.l.Lock() + defer b.l.Unlock() + return b.Buffer.String() +} + +func TestDatagramConnServe_RegisterSession_SessionManagerError(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + expectedErr := errors.New("unable to register session") + sessionManager := mockSessionManager{expectedRegErr: expectedErr} + conn := v3.NewDatagramConn(quic, &sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) + + // Setup the muxer + ctx, cancel := context.WithCancelCause(context.Background()) + defer cancel(errors.New("other error")) + done := make(chan error, 1) + go func() { + done <- conn.Serve(ctx) + }() + + // Send new session registration + datagram := newRegisterSessionDatagram(testRequestID) + quic.send <- datagram + + // Wait for session registration response with failure + datagram = <-quic.recv + var resp v3.UDPSessionRegistrationResponseDatagram + err := resp.UnmarshalBinary(datagram) + if err != nil { + t.Fatal(err) + } + + if resp.RequestID != testRequestID || resp.ResponseType != v3.ResponseUnableToBindSocket { + t.Fatalf("expected registration response failure") + } + + // Cancel the muxer Serve context and make sure it closes with the expected error + assertContextClosed(t, ctx, done, cancel) +} + +func TestDatagramConnServe(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + session := newMockSession() + sessionManager := mockSessionManager{session: &session} + conn := v3.NewDatagramConn(quic, &sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) + + // Setup the muxer + ctx, cancel := context.WithCancelCause(context.Background()) + defer cancel(errors.New("other error")) + done := make(chan error, 1) + go func() { + done <- conn.Serve(ctx) + }() + + // Send new session registration + datagram := newRegisterSessionDatagram(testRequestID) + quic.send <- datagram + + // Wait for session registration response with success + datagram = <-quic.recv + var resp v3.UDPSessionRegistrationResponseDatagram + err := resp.UnmarshalBinary(datagram) + if err != nil { + t.Fatal(err) + } + + if resp.RequestID != testRequestID || resp.ResponseType != v3.ResponseOk { + t.Fatalf("expected registration response ok") + } + + // We expect the session to be served + timer := time.NewTimer(15 * time.Second) + defer timer.Stop() + select { + case <-session.served: + break + case <-timer.C: + t.Fatalf("expected session serve to be called") + } + + // Cancel the muxer Serve context and make sure it closes with the expected error + assertContextClosed(t, ctx, done, cancel) +} + +func TestDatagramConnServe_RegisterTwice(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + session := newMockSession() + sessionManager := mockSessionManager{session: &session} + conn := v3.NewDatagramConn(quic, &sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) + + // Setup the muxer + ctx, cancel := context.WithCancelCause(context.Background()) + defer cancel(errors.New("other error")) + done := make(chan error, 1) + go func() { + done <- conn.Serve(ctx) + }() + + // Send new session registration + datagram := newRegisterSessionDatagram(testRequestID) + quic.send <- datagram + + // Wait for session registration response with success + datagram = <-quic.recv + var resp v3.UDPSessionRegistrationResponseDatagram + err := resp.UnmarshalBinary(datagram) + if err != nil { + t.Fatal(err) + } + + if resp.RequestID != testRequestID || resp.ResponseType != v3.ResponseOk { + t.Fatalf("expected registration response ok") + } + + // Set the session manager to return already registered + sessionManager.expectedRegErr = v3.ErrSessionAlreadyRegistered + // Send the registration again as if we didn't receive it at the edge + datagram = newRegisterSessionDatagram(testRequestID) + quic.send <- datagram + + // Wait for session registration response with success + datagram = <-quic.recv + err = resp.UnmarshalBinary(datagram) + if err != nil { + t.Fatal(err) + } + + if resp.RequestID != testRequestID || resp.ResponseType != v3.ResponseOk { + t.Fatalf("expected registration response ok") + } + + // We expect the session to be served + timer := time.NewTimer(15 * time.Second) + defer timer.Stop() + select { + case <-session.served: + break + case <-timer.C: + t.Fatalf("expected session serve to be called") + } + + // Cancel the muxer Serve context and make sure it closes with the expected error + assertContextClosed(t, ctx, done, cancel) +} + +func TestDatagramConnServe_MigrateConnection(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + session := newMockSession() + sessionManager := mockSessionManager{session: &session} + conn := v3.NewDatagramConn(quic, &sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) + quic2 := newMockQuicConn() + conn2 := v3.NewDatagramConn(quic2, &sessionManager, &noopICMPRouter{}, 1, &noopMetrics{}, &log) + + // Setup the muxer + ctx, cancel := context.WithCancelCause(context.Background()) + defer cancel(errors.New("other error")) + done := make(chan error, 1) + go func() { + done <- conn.Serve(ctx) + }() + + ctx2, cancel2 := context.WithCancelCause(context.Background()) + defer cancel2(errors.New("other error")) + done2 := make(chan error, 1) + go func() { + done2 <- conn2.Serve(ctx2) + }() + + // Send new session registration + datagram := newRegisterSessionDatagram(testRequestID) + quic.send <- datagram + + // Wait for session registration response with success + datagram = <-quic.recv + var resp v3.UDPSessionRegistrationResponseDatagram + err := resp.UnmarshalBinary(datagram) + if err != nil { + t.Fatal(err) + } + + if resp.RequestID != testRequestID || resp.ResponseType != v3.ResponseOk { + t.Fatalf("expected registration response ok") + } + + // Set the session manager to return already registered to another connection + sessionManager.expectedRegErr = v3.ErrSessionBoundToOtherConn + // Send the registration again as if we didn't receive it at the edge for a new connection + datagram = newRegisterSessionDatagram(testRequestID) + quic2.send <- datagram + + // Wait for session registration response with success + datagram = <-quic2.recv + err = resp.UnmarshalBinary(datagram) + if err != nil { + t.Fatal(err) + } + + if resp.RequestID != testRequestID || resp.ResponseType != v3.ResponseOk { + t.Fatalf("expected registration response ok") + } + + // We expect the session to be served + timer := time.NewTimer(15 * time.Second) + defer timer.Stop() + select { + case <-session.served: + break + case <-timer.C: + t.Fatalf("expected session serve to be called") + } + + // Expect session to be migrated + select { + case id := <-session.migrated: + if id != conn2.ID() { + t.Fatalf("expected session to be migrated to connection 2") + } + case <-timer.C: + t.Fatalf("expected session migration to be called") + } + + // Cancel the muxer Serve context and make sure it closes with the expected error + assertContextClosed(t, ctx, done, cancel) + // Cancel the second muxer Serve context and make sure it closes with the expected error + assertContextClosed(t, ctx2, done2, cancel2) +} + +func TestDatagramConnServe_Payload_GetSessionError(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + // mockSessionManager will return the ErrSessionNotFound for any session attempting to be queried by the muxer + sessionManager := mockSessionManager{session: nil, expectedGetErr: v3.ErrSessionNotFound} + conn := v3.NewDatagramConn(quic, &sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) + + // Setup the muxer + ctx, cancel := context.WithCancelCause(context.Background()) + defer cancel(errors.New("other error")) + done := make(chan error, 1) + go func() { + done <- conn.Serve(ctx) + }() + + // Send new session registration + datagram := newSessionPayloadDatagram(testRequestID, []byte{0xef, 0xef}) + quic.send <- datagram + + // Since the muxer should eventually discard a failed registration request, there is no side-effect + // that the registration was failed beyond the muxer accepting the registration request. As such, the + // test can only ensure that the quic.send channel was consumed and that the muxer closes normally + // afterwards with the expected context cancelled trigger. + + // Cancel the muxer Serve context and make sure it closes with the expected error + assertContextClosed(t, ctx, done, cancel) +} + +func TestDatagramConnServe_Payload(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + session := newMockSession() + sessionManager := mockSessionManager{session: &session} + conn := v3.NewDatagramConn(quic, &sessionManager, &noopICMPRouter{}, 0, &noopMetrics{}, &log) + + // Setup the muxer + ctx, cancel := context.WithCancelCause(context.Background()) + defer cancel(errors.New("other error")) + done := make(chan error, 1) + go func() { + done <- conn.Serve(ctx) + }() + + // Send new session registration + expectedPayload := []byte{0xef, 0xef} + datagram := newSessionPayloadDatagram(testRequestID, expectedPayload) + quic.send <- datagram + + // Session should receive the payload + payload := <-session.recv + if !slices.Equal(expectedPayload, payload) { + t.Fatalf("expected session receieve the payload sent via the muxer") + } + + // Cancel the muxer Serve context and make sure it closes with the expected error + assertContextClosed(t, ctx, done, cancel) +} + +func TestDatagramConnServe_ICMPDatagram_TTLDecremented(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + router := newMockICMPRouter() + conn := v3.NewDatagramConn(quic, &mockSessionManager{}, router, 0, &noopMetrics{}, &log) + + // Setup the muxer + ctx, cancel := context.WithCancelCause(context.Background()) + defer cancel(errors.New("other error")) + done := make(chan error, 1) + go func() { + done <- conn.Serve(ctx) + }() + + // Send new ICMP Echo request + expectedICMP := &packet.ICMP{ + IP: &packet.IP{ + Src: netip.MustParseAddr("192.168.1.1"), + Dst: netip.MustParseAddr("10.0.0.1"), + Protocol: layers.IPProtocolICMPv4, + TTL: 20, + }, + Message: &icmp.Message{ + Type: ipv4.ICMPTypeEcho, + Code: 0, + Body: &icmp.Echo{ + ID: 25821, + Seq: 58129, + Data: []byte("test ttl=0"), + }, + }, + } + datagram := newICMPDatagram(expectedICMP) + quic.send <- datagram + + // Router should receive the packet + actualICMP := <-router.recv + assertICMPEqual(t, expectedICMP, actualICMP) + if expectedICMP.TTL-1 != actualICMP.TTL { + t.Fatalf("TTL should be decremented by one before sending to origin: %d, %d", expectedICMP.TTL, actualICMP.TTL) + } + + // Cancel the muxer Serve context and make sure it closes with the expected error + assertContextClosed(t, ctx, done, cancel) +} + +func TestDatagramConnServe_ICMPDatagram_TTLExceeded(t *testing.T) { + log := zerolog.Nop() + quic := newMockQuicConn() + router := newMockICMPRouter() + conn := v3.NewDatagramConn(quic, &mockSessionManager{}, router, 0, &noopMetrics{}, &log) + + // Setup the muxer + ctx, cancel := context.WithCancelCause(context.Background()) + defer cancel(errors.New("other error")) + done := make(chan error, 1) + go func() { + done <- conn.Serve(ctx) + }() + + // Send new ICMP Echo request + expectedICMP := &packet.ICMP{ + IP: &packet.IP{ + Src: netip.MustParseAddr("192.168.1.1"), + Dst: netip.MustParseAddr("10.0.0.1"), + Protocol: layers.IPProtocolICMPv4, + TTL: 0, + }, + Message: &icmp.Message{ + Type: ipv4.ICMPTypeEcho, + Code: 0, + Body: &icmp.Echo{ + ID: 25821, + Seq: 58129, + Data: []byte("test ttl=0"), + }, + }, + } + datagram := newICMPDatagram(expectedICMP) + quic.send <- datagram + + // Origin should not recieve a packet + select { + case <-router.recv: + t.Fatalf("TTL should be expired and no origin ICMP sent") + default: + } + + // Eyeball should receive the packet + datagram = <-quic.recv + icmpDatagram := v3.ICMPDatagram{} + err := icmpDatagram.UnmarshalBinary(datagram) + if err != nil { + t.Fatal(err) + } + decoder := packet.NewICMPDecoder() + ttlExpiredICMP, err := decoder.Decode(packet.RawPacket{Data: icmpDatagram.Payload}) + if err != nil { + t.Fatal(err) + } + + // Packet should be a TTL Exceeded ICMP + if ttlExpiredICMP.TTL != packet.DefaultTTL || ttlExpiredICMP.Message.Type != ipv4.ICMPTypeTimeExceeded { + t.Fatalf("ICMP packet should be a ICMP Exceeded: %+v", ttlExpiredICMP) + } + + // Cancel the muxer Serve context and make sure it closes with the expected error + assertContextClosed(t, ctx, done, cancel) +} + +func newRegisterSessionDatagram(id v3.RequestID) []byte { + datagram := v3.UDPSessionRegistrationDatagram{ + RequestID: id, + Dest: netip.MustParseAddrPort("127.0.0.1:8080"), + IdleDurationHint: 5 * time.Second, + } + payload, err := datagram.MarshalBinary() + if err != nil { + panic(err) + } + return payload +} + +func newRegisterResponseSessionDatagram(id v3.RequestID, resp v3.SessionRegistrationResp) []byte { + datagram := v3.UDPSessionRegistrationResponseDatagram{ + RequestID: id, + ResponseType: resp, + } + payload, err := datagram.MarshalBinary() + if err != nil { + panic(err) + } + return payload +} + +func newSessionPayloadDatagram(id v3.RequestID, payload []byte) []byte { + datagram := make([]byte, len(payload)+17) + err := v3.MarshalPayloadHeaderTo(id, datagram[:]) + if err != nil { + panic(err) + } + copy(datagram[17:], payload) + return datagram +} + +func newICMPDatagram(pk *packet.ICMP) []byte { + encoder := packet.NewEncoder() + rawPacket, err := encoder.Encode(pk) + if err != nil { + panic(err) + } + datagram := v3.ICMPDatagram{ + Payload: rawPacket.Data, + } + payload, err := datagram.MarshalBinary() + if err != nil { + panic(err) + } + return payload +} + +// Cancel the provided context and make sure it closes with the expected cancellation error +func assertContextClosed(t *testing.T, ctx context.Context, done <-chan error, cancel context.CancelCauseFunc) { + cancel(expectedContextCanceled) + err := <-done + if !errors.Is(err, context.Canceled) { + t.Fatal(err) + } + if !errors.Is(context.Cause(ctx), expectedContextCanceled) { + t.Fatal(err) + } +} + +type mockQuicConn struct { + ctx context.Context + send chan []byte + recv chan []byte +} + +func newMockQuicConn() *mockQuicConn { + return &mockQuicConn{ + ctx: context.Background(), + send: make(chan []byte, 1), + recv: make(chan []byte, 1), + } +} + +func (m *mockQuicConn) Context() context.Context { + return m.ctx +} + +func (m *mockQuicConn) SendDatagram(payload []byte) error { + b := make([]byte, len(payload)) + copy(b, payload) + m.recv <- b + return nil +} + +func (m *mockQuicConn) ReceiveDatagram(_ context.Context) ([]byte, error) { + return <-m.send, nil +} + +type mockQuicConnReadError struct { + err error +} + +func (m *mockQuicConnReadError) Context() context.Context { + return context.Background() +} + +func (m *mockQuicConnReadError) SendDatagram(payload []byte) error { + return nil +} + +func (m *mockQuicConnReadError) ReceiveDatagram(_ context.Context) ([]byte, error) { + return nil, m.err +} + +type mockSessionManager struct { + session v3.Session + + expectedRegErr error + expectedGetErr error +} + +func (m *mockSessionManager) RegisterSession(request *v3.UDPSessionRegistrationDatagram, conn v3.DatagramConn) (v3.Session, error) { + return m.session, m.expectedRegErr +} + +func (m *mockSessionManager) GetSession(requestID v3.RequestID) (v3.Session, error) { + return m.session, m.expectedGetErr +} + +func (m *mockSessionManager) UnregisterSession(requestID v3.RequestID) {} + +type mockSession struct { + served chan struct{} + migrated chan uint8 + recv chan []byte +} + +func newMockSession() mockSession { + return mockSession{ + served: make(chan struct{}), + migrated: make(chan uint8, 2), + recv: make(chan []byte, 1), + } +} + +func (m *mockSession) ID() v3.RequestID { return testRequestID } +func (m *mockSession) RemoteAddr() net.Addr { return testOriginAddr } +func (m *mockSession) LocalAddr() net.Addr { return testLocalAddr } +func (m *mockSession) ConnectionID() uint8 { return 0 } +func (m *mockSession) Migrate(conn v3.DatagramConn, ctx context.Context, log *zerolog.Logger) { + m.migrated <- conn.ID() +} +func (m *mockSession) ResetIdleTimer() {} + +func (m *mockSession) Serve(ctx context.Context) error { + close(m.served) + return v3.SessionCloseErr +} + +func (m *mockSession) Write(payload []byte) (n int, err error) { + b := make([]byte, len(payload)) + copy(b, payload) + m.recv <- b + return len(b), nil +} + +func (m *mockSession) Close() error { + return nil +} diff --git a/quic/v3/request.go b/quic/v3/request.go new file mode 100644 index 00000000..b716605d --- /dev/null +++ b/quic/v3/request.go @@ -0,0 +1,89 @@ +package v3 + +import ( + "encoding/binary" + "errors" + "fmt" +) + +const ( + datagramRequestIdLen = 16 +) + +var ( + // ErrInvalidRequestIDLen is returned when the provided request id can not be parsed from the provided byte slice. + ErrInvalidRequestIDLen error = errors.New("invalid request id length provided") + // ErrInvalidPayloadDestLen is returned when the provided destination byte slice cannot fit the whole request id. + ErrInvalidPayloadDestLen error = errors.New("invalid payload size provided") +) + +// RequestID is the request-id-v2 identifier, it is used to distinguish between specific flows or sessions proxied +// from the edge to cloudflared. +type RequestID uint128 + +type uint128 struct { + hi uint64 + lo uint64 +} + +// RequestIDFromSlice reads a request ID from a byte slice. +func RequestIDFromSlice(data []byte) (RequestID, error) { + if len(data) != datagramRequestIdLen { + return RequestID{}, ErrInvalidRequestIDLen + } + + return RequestID{ + hi: binary.BigEndian.Uint64(data[:8]), + lo: binary.BigEndian.Uint64(data[8:]), + }, nil +} + +func (id RequestID) String() string { + return fmt.Sprintf("%016x%016x", id.hi, id.lo) +} + +// Compare returns an integer comparing two IPs. +// The result will be 0 if id == id2, -1 if id < id2, and +1 if id > id2. +// The definition of "less than" is the same as the [RequestID.Less] method. +func (id RequestID) Compare(id2 RequestID) int { + hi1, hi2 := id.hi, id2.hi + if hi1 < hi2 { + return -1 + } + if hi1 > hi2 { + return 1 + } + lo1, lo2 := id.lo, id2.lo + if lo1 < lo2 { + return -1 + } + if lo1 > lo2 { + return 1 + } + return 0 +} + +// Less reports whether id sorts before id2. +func (id RequestID) Less(id2 RequestID) bool { return id.Compare(id2) == -1 } + +// MarshalBinaryTo writes the id to the provided destination byte slice; the byte slice must be of at least size 16. +func (id RequestID) MarshalBinaryTo(data []byte) error { + if len(data) < datagramRequestIdLen { + return ErrInvalidPayloadDestLen + } + binary.BigEndian.PutUint64(data[:8], id.hi) + binary.BigEndian.PutUint64(data[8:], id.lo) + return nil +} + +func (id *RequestID) UnmarshalBinary(data []byte) error { + if len(data) != 16 { + return fmt.Errorf("invalid length slice provided to unmarshal: %d (expected 16)", len(data)) + } + + *id = RequestID{ + binary.BigEndian.Uint64(data[:8]), + binary.BigEndian.Uint64(data[8:]), + } + return nil +} diff --git a/quic/v3/request_test.go b/quic/v3/request_test.go new file mode 100644 index 00000000..3c7915c7 --- /dev/null +++ b/quic/v3/request_test.go @@ -0,0 +1,64 @@ +package v3_test + +import ( + "crypto/rand" + "slices" + "testing" + + "github.com/stretchr/testify/require" + + v3 "github.com/cloudflare/cloudflared/quic/v3" +) + +var ( + testRequestIDBytes = [16]byte{ + 0x00, 0x11, 0x22, 0x33, + 0x44, 0x55, 0x66, 0x77, + 0x88, 0x99, 0xaa, 0xbb, + 0xcc, 0xdd, 0xee, 0xff, + } + testRequestID = mustRequestID(testRequestIDBytes) +) + +func mustRequestID(data [16]byte) v3.RequestID { + id, err := v3.RequestIDFromSlice(data[:]) + if err != nil { + panic(err) + } + return id +} + +func TestRequestIDParsing(t *testing.T) { + buf1 := make([]byte, 16) + n, err := rand.Read(buf1) + if err != nil { + t.Fatal(err) + } + if n != 16 { + t.Fatalf("did not read 16 bytes: %d", n) + } + id, err := v3.RequestIDFromSlice(buf1) + if err != nil { + t.Fatal(err) + } + buf2 := make([]byte, 16) + err = id.MarshalBinaryTo(buf2) + if err != nil { + t.Fatal(err) + } + if !slices.Equal(buf1, buf2) { + t.Fatalf("buf1 != buf2: %+v %+v", buf1, buf2) + } +} + +func TestRequestID_MarshalBinary(t *testing.T) { + buf := make([]byte, 16) + err := testRequestID.MarshalBinaryTo(buf) + require.NoError(t, err) + require.Len(t, buf, 16) + + parsed := v3.RequestID{} + err = parsed.UnmarshalBinary(buf) + require.NoError(t, err) + require.Equal(t, testRequestID, parsed) +} diff --git a/quic/v3/session.go b/quic/v3/session.go new file mode 100644 index 00000000..6836aed9 --- /dev/null +++ b/quic/v3/session.go @@ -0,0 +1,277 @@ +package v3 + +import ( + "context" + "errors" + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + + "github.com/rs/zerolog" +) + +const ( + // A default is provided in the case that the client does not provide a close idle timeout. + defaultCloseIdleAfter = 210 * time.Second + + // The maximum payload from the origin that we will be able to read. However, even though we will + // read 1500 bytes from the origin, we limit the amount of bytes to be proxied to less than + // this value (maxDatagramPayloadLen). + maxOriginUDPPacketSize = 1500 + + logFlowID = "flowID" + logPacketSizeKey = "packetSize" +) + +// SessionCloseErr indicates that the session's Close method was called. +var SessionCloseErr error = errors.New("flow was closed directly") + +// SessionIdleErr is returned when the session was closed because there was no communication +// in either direction over the session for the timeout period. +type SessionIdleErr struct { + timeout time.Duration +} + +func (e SessionIdleErr) Error() string { + return fmt.Sprintf("flow was idle for %v", e.timeout) +} + +func (e SessionIdleErr) Is(target error) bool { + _, ok := target.(SessionIdleErr) + return ok +} + +func newSessionIdleErr(timeout time.Duration) error { + return SessionIdleErr{timeout} +} + +type Session interface { + io.WriteCloser + ID() RequestID + ConnectionID() uint8 + RemoteAddr() net.Addr + LocalAddr() net.Addr + ResetIdleTimer() + Migrate(eyeball DatagramConn, ctx context.Context, logger *zerolog.Logger) + // Serve starts the event loop for processing UDP packets + Serve(ctx context.Context) error +} + +type session struct { + id RequestID + closeAfterIdle time.Duration + origin io.ReadWriteCloser + originAddr net.Addr + localAddr net.Addr + eyeball atomic.Pointer[DatagramConn] + // activeAtChan is used to communicate the last read/write time + activeAtChan chan time.Time + closeChan chan error + contextChan chan context.Context + metrics Metrics + log *zerolog.Logger + + // A special close function that we wrap with sync.Once to make sure it is only called once + closeFn func() error +} + +func NewSession( + id RequestID, + closeAfterIdle time.Duration, + origin io.ReadWriteCloser, + originAddr net.Addr, + localAddr net.Addr, + eyeball DatagramConn, + metrics Metrics, + log *zerolog.Logger, +) Session { + logger := log.With().Str(logFlowID, id.String()).Logger() + // closeChan has two slots to allow for both writers (the closeFn and the Serve routine) to both be able to + // write to the channel without blocking since there is only ever one value read from the closeChan by the + // waitForCloseCondition. + closeChan := make(chan error, 2) + session := &session{ + id: id, + closeAfterIdle: closeAfterIdle, + origin: origin, + originAddr: originAddr, + localAddr: localAddr, + eyeball: atomic.Pointer[DatagramConn]{}, + // activeAtChan has low capacity. It can be full when there are many concurrent read/write. markActive() will + // drop instead of blocking because last active time only needs to be an approximation + activeAtChan: make(chan time.Time, 1), + closeChan: closeChan, + // contextChan is an unbounded channel to help enforce one active migration of a session at a time. + contextChan: make(chan context.Context), + metrics: metrics, + log: &logger, + closeFn: sync.OnceValue(func() error { + // We don't want to block on sending to the close channel if it is already full + select { + case closeChan <- SessionCloseErr: + default: + } + return origin.Close() + }), + } + session.eyeball.Store(&eyeball) + return session +} + +func (s *session) ID() RequestID { + return s.id +} + +func (s *session) RemoteAddr() net.Addr { + return s.originAddr +} + +func (s *session) LocalAddr() net.Addr { + return s.localAddr +} + +func (s *session) ConnectionID() uint8 { + eyeball := *(s.eyeball.Load()) + return eyeball.ID() +} + +func (s *session) Migrate(eyeball DatagramConn, ctx context.Context, logger *zerolog.Logger) { + current := *(s.eyeball.Load()) + // Only migrate if the connection ids are different. + if current.ID() != eyeball.ID() { + s.eyeball.Store(&eyeball) + s.contextChan <- ctx + log := logger.With().Str(logFlowID, s.id.String()).Logger() + s.log = &log + } + // The session is already running so we want to restart the idle timeout since no proxied packets have come down yet. + s.markActive() + s.metrics.MigrateFlow() +} + +func (s *session) Serve(ctx context.Context) error { + go func() { + // QUIC implementation copies data to another buffer before returning https://github.com/quic-go/quic-go/blob/v0.24.0/session.go#L1967-L1975 + // This makes it safe to share readBuffer between iterations + readBuffer := [maxOriginUDPPacketSize + DatagramPayloadHeaderLen]byte{} + // To perform a zero copy write when passing the datagram to the connection, we prepare the buffer with + // the required datagram header information. We can reuse this buffer for this session since the header is the + // same for the each read. + MarshalPayloadHeaderTo(s.id, readBuffer[:DatagramPayloadHeaderLen]) + for { + // Read from the origin UDP socket + n, err := s.origin.Read(readBuffer[DatagramPayloadHeaderLen:]) + if err != nil { + if errors.Is(err, io.EOF) || + errors.Is(err, io.ErrUnexpectedEOF) { + s.log.Debug().Msgf("flow (origin) connection closed: %v", err) + } + s.closeChan <- err + return + } + if n < 0 { + s.log.Warn().Int(logPacketSizeKey, n).Msg("flow (origin) packet read was negative and was dropped") + continue + } + if n > maxDatagramPayloadLen { + s.metrics.PayloadTooLarge() + s.log.Error().Int(logPacketSizeKey, n).Msg("flow (origin) packet read was too large and was dropped") + continue + } + // We need to synchronize on the eyeball in-case that the connection was migrated. This should be rarely a point + // of lock contention, as a migration can only happen during startup of a session before traffic flow. + eyeball := *(s.eyeball.Load()) + // Sending a packet to the session does block on the [quic.Connection], however, this is okay because it + // will cause back-pressure to the kernel buffer if the writes are not fast enough to the edge. + err = eyeball.SendUDPSessionDatagram(readBuffer[:DatagramPayloadHeaderLen+n]) + if err != nil { + s.closeChan <- err + return + } + // Mark the session as active since we proxied a valid packet from the origin. + s.markActive() + } + }() + return s.waitForCloseCondition(ctx, s.closeAfterIdle) +} + +func (s *session) Write(payload []byte) (n int, err error) { + n, err = s.origin.Write(payload) + if err != nil { + s.log.Err(err).Msg("failed to write payload to flow (remote)") + return n, err + } + // Write must return a non-nil error if it returns n < len(p). https://pkg.go.dev/io#Writer + if n < len(payload) { + s.log.Err(io.ErrShortWrite).Msg("failed to write the full payload to flow (remote)") + return n, io.ErrShortWrite + } + // Mark the session as active since we proxied a packet to the origin. + s.markActive() + return n, err +} + +// ResetIdleTimer will restart the current idle timer. +// +// This public method is used to allow operators of sessions the ability to extend the session using information that is +// known external to the session itself. +func (s *session) ResetIdleTimer() { + s.markActive() +} + +// Sends the last active time to the idle checker loop without blocking. activeAtChan will only be full when there +// are many concurrent read/write. It is fine to lose some precision +func (s *session) markActive() { + select { + case s.activeAtChan <- time.Now(): + default: + } +} + +func (s *session) Close() error { + // Make sure that we only close the origin connection once + return s.closeFn() +} + +func (s *session) waitForCloseCondition(ctx context.Context, closeAfterIdle time.Duration) error { + connCtx := ctx + // Closing the session at the end cancels read so Serve() can return + defer s.Close() + if closeAfterIdle == 0 { + // provide deafult is caller doesn't specify one + closeAfterIdle = defaultCloseIdleAfter + } + + checkIdleTimer := time.NewTimer(closeAfterIdle) + defer checkIdleTimer.Stop() + + for { + select { + case <-connCtx.Done(): + return connCtx.Err() + case newContext := <-s.contextChan: + // During migration of a session, we need to make sure that the context of the new connection is used instead + // of the old connection context. This will ensure that when the old connection goes away, this session will + // still be active on the existing connection. + connCtx = newContext + continue + case reason := <-s.closeChan: + return reason + case <-checkIdleTimer.C: + // The check idle timer will only return after an idle period since the last active + // operation (read or write). + return newSessionIdleErr(closeAfterIdle) + case <-s.activeAtChan: + // The session is still active, we want to reset the timer. First we have to stop the timer, drain the + // current value and then reset. It's okay if we lose some time on this operation as we don't need to + // close an idle session directly on-time. + if !checkIdleTimer.Stop() { + <-checkIdleTimer.C + } + checkIdleTimer.Reset(closeAfterIdle) + } + } +} diff --git a/quic/v3/session_fuzz_test.go b/quic/v3/session_fuzz_test.go new file mode 100644 index 00000000..0e4952c0 --- /dev/null +++ b/quic/v3/session_fuzz_test.go @@ -0,0 +1,23 @@ +package v3_test + +import ( + "testing" +) + +// FuzzSessionWrite verifies that we don't run into any panics when writing variable sized payloads to the origin. +func FuzzSessionWrite(f *testing.F) { + f.Fuzz(func(t *testing.T, b []byte) { + testSessionWrite(t, b) + }) +} + +// FuzzSessionServe verifies that we don't run into any panics when reading variable sized payloads from the origin. +func FuzzSessionServe(f *testing.F) { + f.Fuzz(func(t *testing.T, b []byte) { + // The origin transport read is bound to 1280 bytes + if len(b) > 1280 { + b = b[:1280] + } + testSessionServe_Origin(t, b) + }) +} diff --git a/quic/v3/session_test.go b/quic/v3/session_test.go new file mode 100644 index 00000000..b739ca2d --- /dev/null +++ b/quic/v3/session_test.go @@ -0,0 +1,394 @@ +package v3_test + +import ( + "context" + "errors" + "io" + "net" + "net/netip" + "slices" + "testing" + "time" + + "github.com/fortytw2/leaktest" + "github.com/rs/zerolog" + + v3 "github.com/cloudflare/cloudflared/quic/v3" +) + +var ( + expectedContextCanceled = errors.New("expected context canceled") + + testOriginAddr = net.UDPAddrFromAddrPort(netip.MustParseAddrPort("127.0.0.1:0")) + testLocalAddr = net.UDPAddrFromAddrPort(netip.MustParseAddrPort("127.0.0.1:0")) +) + +func TestSessionNew(t *testing.T) { + log := zerolog.Nop() + session := v3.NewSession(testRequestID, 5*time.Second, nil, testOriginAddr, testLocalAddr, &noopEyeball{}, &noopMetrics{}, &log) + if testRequestID != session.ID() { + t.Fatalf("session id doesn't match: %s != %s", testRequestID, session.ID()) + } +} + +func testSessionWrite(t *testing.T, payload []byte) { + log := zerolog.Nop() + origin, server := net.Pipe() + defer origin.Close() + defer server.Close() + // Start origin server read + serverRead := make(chan []byte, 1) + go func() { + read := make([]byte, 1500) + server.Read(read[:]) + serverRead <- read + }() + // Create session and write to origin + session := v3.NewSession(testRequestID, 5*time.Second, origin, testOriginAddr, testLocalAddr, &noopEyeball{}, &noopMetrics{}, &log) + n, err := session.Write(payload) + defer session.Close() + if err != nil { + t.Fatal(err) + } + if n != len(payload) { + t.Fatal("unable to write the whole payload") + } + + read := <-serverRead + if !slices.Equal(payload, read[:len(payload)]) { + t.Fatal("payload provided from origin and read value are not the same") + } +} + +func TestSessionWrite_Max(t *testing.T) { + defer leaktest.Check(t)() + payload := makePayload(1280) + testSessionWrite(t, payload) +} + +func TestSessionWrite_Min(t *testing.T) { + defer leaktest.Check(t)() + payload := makePayload(0) + testSessionWrite(t, payload) +} + +func TestSessionServe_OriginMax(t *testing.T) { + defer leaktest.Check(t)() + payload := makePayload(1280) + testSessionServe_Origin(t, payload) +} + +func TestSessionServe_OriginMin(t *testing.T) { + defer leaktest.Check(t)() + payload := makePayload(0) + testSessionServe_Origin(t, payload) +} + +func testSessionServe_Origin(t *testing.T, payload []byte) { + log := zerolog.Nop() + origin, server := net.Pipe() + defer origin.Close() + defer server.Close() + eyeball := newMockEyeball() + session := v3.NewSession(testRequestID, 3*time.Second, origin, testOriginAddr, testLocalAddr, &eyeball, &noopMetrics{}, &log) + defer session.Close() + + ctx, cancel := context.WithCancelCause(context.Background()) + defer cancel(context.Canceled) + done := make(chan error) + go func() { + done <- session.Serve(ctx) + }() + + // Write from the origin server + _, err := server.Write(payload) + if err != nil { + t.Fatal(err) + } + + select { + case data := <-eyeball.recvData: + // check received data matches provided from origin + expectedData := makePayload(1500) + v3.MarshalPayloadHeaderTo(testRequestID, expectedData[:]) + copy(expectedData[17:], payload) + if !slices.Equal(expectedData[:v3.DatagramPayloadHeaderLen+len(payload)], data) { + t.Fatal("expected datagram did not equal expected") + } + cancel(expectedContextCanceled) + case err := <-ctx.Done(): + // we expect the payload to return before the context to cancel on the session + t.Fatal(err) + } + + err = <-done + if !errors.Is(err, context.Canceled) { + t.Fatal(err) + } + if !errors.Is(context.Cause(ctx), expectedContextCanceled) { + t.Fatal(err) + } +} + +func TestSessionServe_OriginTooLarge(t *testing.T) { + defer leaktest.Check(t)() + log := zerolog.Nop() + eyeball := newMockEyeball() + payload := makePayload(1281) + origin, server := net.Pipe() + defer origin.Close() + defer server.Close() + session := v3.NewSession(testRequestID, 2*time.Second, origin, testOriginAddr, testLocalAddr, &eyeball, &noopMetrics{}, &log) + defer session.Close() + + done := make(chan error) + go func() { + done <- session.Serve(context.Background()) + }() + + // Attempt to write a payload too large from the origin + _, err := server.Write(payload) + if err != nil { + t.Fatal(err) + } + + select { + case data := <-eyeball.recvData: + // we never expect a read to make it here because the origin provided a payload that is too large + // for cloudflared to proxy and it will drop it. + t.Fatalf("we should never proxy a payload of this size: %d", len(data)) + case err := <-done: + if !errors.Is(err, v3.SessionIdleErr{}) { + t.Error(err) + } + } +} + +func TestSessionServe_Migrate(t *testing.T) { + defer leaktest.Check(t)() + log := zerolog.Nop() + eyeball := newMockEyeball() + pipe1, pipe2 := net.Pipe() + session := v3.NewSession(testRequestID, 2*time.Second, pipe2, testOriginAddr, testLocalAddr, &eyeball, &noopMetrics{}, &log) + defer session.Close() + + done := make(chan error) + eyeball1Ctx, cancel := context.WithCancelCause(context.Background()) + go func() { + done <- session.Serve(eyeball1Ctx) + }() + + // Migrate the session to a new connection before origin sends data + eyeball2 := newMockEyeball() + eyeball2.connID = 1 + eyeball2Ctx := context.Background() + session.Migrate(&eyeball2, eyeball2Ctx, &log) + + // Cancel the origin eyeball context; this should not cancel the session + contextCancelErr := errors.New("context canceled for first eyeball connection") + cancel(contextCancelErr) + select { + case <-done: + t.Fatalf("expected session to still be running") + default: + } + if context.Cause(eyeball1Ctx) != contextCancelErr { + t.Fatalf("first eyeball context should be cancelled manually: %+v", context.Cause(eyeball1Ctx)) + } + + // Origin sends data + payload2 := []byte{0xde} + pipe1.Write(payload2) + + // Expect write to eyeball2 + data := <-eyeball2.recvData + if len(data) <= 17 || !slices.Equal(payload2, data[17:]) { + t.Fatalf("expected data to write to eyeball2 after migration: %+v", data) + } + + select { + case data := <-eyeball.recvData: + t.Fatalf("expected no data to write to eyeball1 after migration: %+v", data) + default: + } + + err := <-done + if !errors.Is(err, v3.SessionIdleErr{}) { + t.Error(err) + } + if eyeball2Ctx.Err() != nil { + t.Fatalf("second eyeball context should be not be cancelled") + } +} + +func TestSessionServe_Migrate_CloseContext2(t *testing.T) { + defer leaktest.Check(t)() + log := zerolog.Nop() + eyeball := newMockEyeball() + pipe1, pipe2 := net.Pipe() + session := v3.NewSession(testRequestID, 2*time.Second, pipe2, testOriginAddr, testLocalAddr, &eyeball, &noopMetrics{}, &log) + defer session.Close() + + done := make(chan error) + eyeball1Ctx, cancel := context.WithCancelCause(context.Background()) + go func() { + done <- session.Serve(eyeball1Ctx) + }() + + // Migrate the session to a new connection before origin sends data + eyeball2 := newMockEyeball() + eyeball2.connID = 1 + eyeball2Ctx, cancel2 := context.WithCancelCause(context.Background()) + session.Migrate(&eyeball2, eyeball2Ctx, &log) + + // Cancel the origin eyeball context; this should not cancel the session + contextCancelErr := errors.New("context canceled for first eyeball connection") + cancel(contextCancelErr) + select { + case <-done: + t.Fatalf("expected session to still be running") + default: + } + if context.Cause(eyeball1Ctx) != contextCancelErr { + t.Fatalf("first eyeball context should be cancelled manually: %+v", context.Cause(eyeball1Ctx)) + } + + // Origin sends data + payload2 := []byte{0xde} + pipe1.Write(payload2) + + // Expect write to eyeball2 + data := <-eyeball2.recvData + if len(data) <= 17 || !slices.Equal(payload2, data[17:]) { + t.Fatalf("expected data to write to eyeball2 after migration: %+v", data) + } + + select { + case data := <-eyeball.recvData: + t.Fatalf("expected no data to write to eyeball1 after migration: %+v", data) + default: + } + + // Close the connection2 context manually + contextCancel2Err := errors.New("context canceled for second eyeball connection") + cancel2(contextCancel2Err) + err := <-done + if err != context.Canceled { + t.Fatalf("session Serve should be done: %+v", err) + } + if context.Cause(eyeball2Ctx) != contextCancel2Err { + t.Fatalf("second eyeball context should have been cancelled manually: %+v", context.Cause(eyeball2Ctx)) + } +} + +func TestSessionClose_Multiple(t *testing.T) { + defer leaktest.Check(t)() + log := zerolog.Nop() + origin, server := net.Pipe() + defer origin.Close() + defer server.Close() + session := v3.NewSession(testRequestID, 5*time.Second, origin, testOriginAddr, testLocalAddr, &noopEyeball{}, &noopMetrics{}, &log) + err := session.Close() + if err != nil { + t.Fatal(err) + } + b := [1500]byte{} + _, err = server.Read(b[:]) + if !errors.Is(err, io.EOF) { + t.Fatalf("origin server connection should be closed: %s", err) + } + // subsequent closes shouldn't call close again or cause any errors + err = session.Close() + if err != nil { + t.Fatal(err) + } + _, err = server.Read(b[:]) + if !errors.Is(err, io.EOF) { + t.Fatalf("origin server connection should still be closed: %s", err) + } +} + +func TestSessionServe_IdleTimeout(t *testing.T) { + defer leaktest.Check(t)() + log := zerolog.Nop() + origin, server := net.Pipe() + defer origin.Close() + defer server.Close() + closeAfterIdle := 2 * time.Second + session := v3.NewSession(testRequestID, closeAfterIdle, origin, testOriginAddr, testLocalAddr, &noopEyeball{}, &noopMetrics{}, &log) + err := session.Serve(context.Background()) + if !errors.Is(err, v3.SessionIdleErr{}) { + t.Fatal(err) + } + // session should be closed + b := [1500]byte{} + _, err = server.Read(b[:]) + if !errors.Is(err, io.EOF) { + t.Fatalf("session should be closed after Serve returns") + } + // closing a session again should not return an error + err = session.Close() + if err != nil { + t.Fatal(err) + } +} + +func TestSessionServe_ParentContextCanceled(t *testing.T) { + defer leaktest.Check(t)() + log := zerolog.Nop() + origin, server := net.Pipe() + defer origin.Close() + defer server.Close() + closeAfterIdle := 10 * time.Second + + session := v3.NewSession(testRequestID, closeAfterIdle, origin, testOriginAddr, testLocalAddr, &noopEyeball{}, &noopMetrics{}, &log) + ctx, cancel := context.WithTimeout(context.Background(), 2*time.Second) + defer cancel() + err := session.Serve(ctx) + if !errors.Is(err, context.DeadlineExceeded) { + t.Fatal(err) + } + // session should be closed + b := [1500]byte{} + _, err = server.Read(b[:]) + if !errors.Is(err, io.EOF) { + t.Fatalf("session should be closed after Serve returns") + } + // closing a session again should not return an error + err = session.Close() + if err != nil { + t.Fatal(err) + } +} + +func TestSessionServe_ReadErrors(t *testing.T) { + defer leaktest.Check(t)() + log := zerolog.Nop() + origin := newTestErrOrigin(net.ErrClosed, nil) + session := v3.NewSession(testRequestID, 30*time.Second, &origin, testOriginAddr, testLocalAddr, &noopEyeball{}, &noopMetrics{}, &log) + err := session.Serve(context.Background()) + if !errors.Is(err, net.ErrClosed) { + t.Fatal(err) + } +} + +type testErrOrigin struct { + readErr error + writeErr error +} + +func newTestErrOrigin(readErr error, writeErr error) testErrOrigin { + return testErrOrigin{readErr, writeErr} +} + +func (o *testErrOrigin) Read(p []byte) (n int, err error) { + return 0, o.readErr +} + +func (o *testErrOrigin) Write(p []byte) (n int, err error) { + return len(p), o.writeErr +} + +func (o *testErrOrigin) Close() error { + return nil +} diff --git a/release_pkgs.py b/release_pkgs.py index 1f31e548..8d1f5c6e 100644 --- a/release_pkgs.py +++ b/release_pkgs.py @@ -113,7 +113,7 @@ class PkgCreator: def create_rpm_pkgs(self, artifacts_path, gpg_key_name): self._setup_rpm_pkg_directories(artifacts_path, gpg_key_name) - p = Popen(["createrepo", "./rpm"], stdout=PIPE, stderr=PIPE) + p = Popen(["createrepo_c", "./rpm"], stdout=PIPE, stderr=PIPE) out, err = p.communicate() if p.returncode != 0: print(f"create rpm_pkgs result => {out}, {err}") diff --git a/ssh_server_tests/Dockerfile b/ssh_server_tests/Dockerfile deleted file mode 100644 index 0f4a1629..00000000 --- a/ssh_server_tests/Dockerfile +++ /dev/null @@ -1,14 +0,0 @@ -FROM python:3-buster - -RUN wget https://github.com/cloudflare/cloudflared/releases/latest/download/cloudflared-linux-amd64.deb \ - && dpkg -i cloudflared-linux-amd64.deb - -RUN pip install pexpect - -COPY tests.py . -COPY ssh /root/.ssh -RUN chmod 600 /root/.ssh/id_rsa - -ARG SSH_HOSTNAME -RUN bash -c 'sed -i "s/{{hostname}}/${SSH_HOSTNAME}/g" /root/.ssh/authorized_keys_config' -RUN bash -c 'sed -i "s/{{hostname}}/${SSH_HOSTNAME}/g" /root/.ssh/short_lived_cert_config' diff --git a/ssh_server_tests/README.md b/ssh_server_tests/README.md deleted file mode 100644 index 391d9040..00000000 --- a/ssh_server_tests/README.md +++ /dev/null @@ -1,23 +0,0 @@ -# Cloudflared SSH server smoke tests - -Runs several tests in a docker container against a server that is started out of band of these tests. -Cloudflared token also needs to be retrieved out of band. -SSH server hostname and user need to be configured in a docker environment file - - -## Running tests - -* Build cloudflared: -make cloudflared - -* Start server: -sudo ./cloudflared tunnel --hostname HOSTNAME --ssh-server - -* Fetch token: -./cloudflared access login HOSTNAME - -* Create docker env file: -echo "SSH_HOSTNAME=HOSTNAME\nSSH_USER=USERNAME\n" > ssh_server_tests/.env - -* Run tests: -make test-ssh-server diff --git a/ssh_server_tests/docker-compose.yml b/ssh_server_tests/docker-compose.yml deleted file mode 100644 index e292dc27..00000000 --- a/ssh_server_tests/docker-compose.yml +++ /dev/null @@ -1,18 +0,0 @@ -version: "3.1" - -services: - ssh_test: - build: - context: . - args: - - SSH_HOSTNAME=${SSH_HOSTNAME} - volumes: - - "~/.cloudflared/:/root/.cloudflared" - env_file: - - .env - environment: - - AUTHORIZED_KEYS_SSH_CONFIG=/root/.ssh/authorized_keys_config - - SHORT_LIVED_CERT_SSH_CONFIG=/root/.ssh/short_lived_cert_config - - REMOTE_SCP_FILENAME=scp_test.txt - - ROOT_ONLY_TEST_FILE_PATH=~/permission_test.txt - entrypoint: "python tests.py" diff --git a/ssh_server_tests/ssh/authorized_keys_config b/ssh_server_tests/ssh/authorized_keys_config deleted file mode 100644 index 288c0b70..00000000 --- a/ssh_server_tests/ssh/authorized_keys_config +++ /dev/null @@ -1,5 +0,0 @@ -Host * - AddressFamily inet - -Host {{hostname}} - ProxyCommand /usr/local/bin/cloudflared access ssh --hostname %h diff --git a/ssh_server_tests/ssh/id_rsa b/ssh_server_tests/ssh/id_rsa deleted file mode 100644 index 16e178b1..00000000 --- a/ssh_server_tests/ssh/id_rsa +++ /dev/null @@ -1,49 +0,0 @@ ------BEGIN OPENSSH PRIVATE KEY----- -b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn -NhAAAAAwEAAQAAAgEAvi26NDQ8cYTTztqPe9ZgF5HR/rIo5FoDgL5NbbZKW6h0txP9Fd8s -id9Bgmo+aGkeM327tPVVMQ6UFmdRksOCIDWQDjNLF8b6S+Fu95tvMKSbGreRoR32OvgZKV -I6KmOsF4z4GIv9naPplZswtKEUhSSI+/gPdAs9wfwalqZ77e82QJ727bYMeC3lzuoT+KBI -dYufJ4OQhLtpHrqhB5sn7s6+oCv/u85GSln5SIC18Hi2t9lW4tgb5tH8P0kEDDWGfPS5ok -qGi4kFTvwBXOCS2r4dhi5hRkpP7PqG4np0OCfvK5IRRJ27fCnj0loc+puZJAxnPMbuJr64 -vwxRx78PM/V0PDUsl0P6aR/vbe0XmF9FGqbWf2Tar1p4r6C9/bMzcDz8seYT8hzLIHP3+R -l1hdlsTLm+1EzhaExKId+tjXegKGG4nU24h6qHEnRxLQDMwEsdkfj4E1pVypZJXVyNj99D -o84vi0EUnu7R4HmQb/C+Pu7qMDtLT3Zk7O5Mg4LQ+cTz9V0noYEAyG46nAB4U/nJzBnV1J -+aAdpioHmUAYhLYlQ9Kiy7LCJi92g9Wqa4wxMKxBbO5ZeH++p2p2lUi/oQNqx/2dLYFmy0 -wxvJHbZIhAaFbOeCvHg1ucIAQznli2jOr2qoB+yKRRPAp/3NXnZg1v7ce2CkwiAD52wjtC -kAAAdILMJUeyzCVHsAAAAHc3NoLXJzYQAAAgEAvi26NDQ8cYTTztqPe9ZgF5HR/rIo5FoD -gL5NbbZKW6h0txP9Fd8sid9Bgmo+aGkeM327tPVVMQ6UFmdRksOCIDWQDjNLF8b6S+Fu95 -tvMKSbGreRoR32OvgZKVI6KmOsF4z4GIv9naPplZswtKEUhSSI+/gPdAs9wfwalqZ77e82 -QJ727bYMeC3lzuoT+KBIdYufJ4OQhLtpHrqhB5sn7s6+oCv/u85GSln5SIC18Hi2t9lW4t -gb5tH8P0kEDDWGfPS5okqGi4kFTvwBXOCS2r4dhi5hRkpP7PqG4np0OCfvK5IRRJ27fCnj -0loc+puZJAxnPMbuJr64vwxRx78PM/V0PDUsl0P6aR/vbe0XmF9FGqbWf2Tar1p4r6C9/b -MzcDz8seYT8hzLIHP3+Rl1hdlsTLm+1EzhaExKId+tjXegKGG4nU24h6qHEnRxLQDMwEsd -kfj4E1pVypZJXVyNj99Do84vi0EUnu7R4HmQb/C+Pu7qMDtLT3Zk7O5Mg4LQ+cTz9V0noY -EAyG46nAB4U/nJzBnV1J+aAdpioHmUAYhLYlQ9Kiy7LCJi92g9Wqa4wxMKxBbO5ZeH++p2 -p2lUi/oQNqx/2dLYFmy0wxvJHbZIhAaFbOeCvHg1ucIAQznli2jOr2qoB+yKRRPAp/3NXn -Zg1v7ce2CkwiAD52wjtCkAAAADAQABAAACAQCbnVsyAFQ9J00Rg/HIiUATyTQlzq57O9SF -8jH1RiZOHedzLx32WaleH5rBFiJ+2RTnWUjQ57aP77fpJR2wk93UcT+w/vPBPwXsNUjRvx -Qan3ZzRCYbyiKDWiNslmYV7X0RwD36CAK8jTVDP7t48h2SXLTiSLaMY+5i3uD6yLu7k/O2 -qNyw4jgN1rCmwQ8acD0aQec3NAZ7NcbsaBX/3Uutsup0scwOZtlJWZoLY5Z8cKpCgcsAz4 -j1NHnNZvey7dFgSffj/ktdvf7kBH0w/GnuJ4aNF0Jte70u0kiw5TZYBQVFh74tgUu6a6SJ -qUbxIYUL5EJNjxGsDn+phHEemw3aMv0CwZG6Tqaionlna7bLsl9Bg1HTGclczVWx8uqC+M -6agLmkhYCHG0rVj8h5smjXAQXtmvIDVYDOlJZZoF9VAOCj6QfmJUH1NAGpCs1HDHbeOxGA -OLCh4d3F4rScPqhGdtSt4W13VFIvXn2Qqoz9ufepZsee1SZqpcerxywx2wN9ZAzu+X8lTN -i+TA2B3vWpqqucOEsp4JwDN+VMKZqKUGUDWcm/eHSaG6wq0q734LUlgM85TjaIg8QsNtWV -giB1nWwsYIuH4rsFNFGEwURYdGBcw6idH0GZ7I4RaIB5F9oOza1d601E0APHYrtnx9yOiK -nOtJ+5ZmVZovaDRfu1aQAAAQBU/EFaNUzoVhO04pS2L6BlByt963bOIsSJhdlEzek5AAli -eaf1S/PD6xWCc0IGY+GZE0HPbhsKYanjqOpWldcA2T7fzf4oz4vFBfUkPYo/MLSlLCYsDd -IH3wBkCssnfR5EkzNgxnOvq646Nl64BMvxwSIXGPktdq9ZALxViwricSRzCFURnh5vLHWU -wBzSgAA0UlZ9E64GtAv066+AoZCp83GhTLRC4o0naE2e/K4op4BCFHLrZ8eXmDRK3NJj80 -Vkn+uhrk+SHmbjIhmS57Pv9p8TWyRvemph/nMUuZGKBUu2X+JQxggck0KigIrXjsmciCsM -BIM3mYDDfjYbyVhTAAABAQDkV8O1bWUsAIqk7RU+iDZojN5kaO+zUvj1TafX8QX1sY6pu4 -Z2cfSEka1532BaehM95bQm7BCPw4cYg56XidmCQTZ9WaWqxVrOo48EKXUtZMZx6nKFOKlq -MT2XTMnGT9n7kFCfEjSVkAjuJ9ZTFLOaoXAaVRnxeHQwOKaup5KKP9GSzNIw328U+96s3V -WKHeT4pMjHBccgW/qX/tRRidZw5in5uBC9Ew5y3UACFTkNOnhUwVfyUNbBZJ2W36msQ3KD -AN7nOrQHqhd3NFyCEy2ovIAKVBacr/VEX6EsRUshIehJzz8EY9f3kXL7WT2QDoz2giPeBJ -HJdEpXt43UpszjAAABAQDVNpqNdHUlCs9XnbIvc6ZRrNh79wt65YFfvh/QEuA33KnA6Ri6 -EgnV5IdUWXS/UFaYcm2udydrBpVIVifSYl3sioHBylpri23BEy38PKwVXvghUtfpN6dWGn -NZUG25fQPtIzqi+lo953ZjIj+Adi17AeVv4P4NiLrZeM9lXfWf2pEPOecxXs1IwAf9IiDQ -WepAwRLsu42eEnHA+DSJPZUkSbISfM5X345k0g6EVATX/yLL3CsqClPzPtsqjh6rbEfFg3 -2OfIMcWV77gOlGWGQ+bUHc8kV6xJqV9QVacLWzfLvIqHF0wQMf8WLOVHEzkfiq4VjwhVqr -/+FFvljm5nSDAAAAEW1pa2VAQzAyWTUwVEdKR0g4AQ== ------END OPENSSH PRIVATE KEY----- diff --git a/ssh_server_tests/ssh/id_rsa.pub b/ssh_server_tests/ssh/id_rsa.pub deleted file mode 100644 index 024f80ee..00000000 --- a/ssh_server_tests/ssh/id_rsa.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQC+Lbo0NDxxhNPO2o971mAXkdH+sijkWgOAvk1ttkpbqHS3E/0V3yyJ30GCaj5oaR4zfbu09VUxDpQWZ1GSw4IgNZAOM0sXxvpL4W73m28wpJsat5GhHfY6+BkpUjoqY6wXjPgYi/2do+mVmzC0oRSFJIj7+A90Cz3B/BqWpnvt7zZAnvbttgx4LeXO6hP4oEh1i58ng5CEu2keuqEHmyfuzr6gK/+7zkZKWflIgLXweLa32Vbi2Bvm0fw/SQQMNYZ89LmiSoaLiQVO/AFc4JLavh2GLmFGSk/s+obienQ4J+8rkhFEnbt8KePSWhz6m5kkDGc8xu4mvri/DFHHvw8z9XQ8NSyXQ/ppH+9t7ReYX0UaptZ/ZNqvWnivoL39szNwPPyx5hPyHMsgc/f5GXWF2WxMub7UTOFoTEoh362Nd6AoYbidTbiHqocSdHEtAMzASx2R+PgTWlXKlkldXI2P30Ojzi+LQRSe7tHgeZBv8L4+7uowO0tPdmTs7kyDgtD5xPP1XSehgQDIbjqcAHhT+cnMGdXUn5oB2mKgeZQBiEtiVD0qLLssImL3aD1aprjDEwrEFs7ll4f76nanaVSL+hA2rH/Z0tgWbLTDG8kdtkiEBoVs54K8eDW5wgBDOeWLaM6vaqgH7IpFE8Cn/c1edmDW/tx7YKTCIAPnbCO0KQ== mike@C02Y50TGJGH8 diff --git a/ssh_server_tests/ssh/short_lived_cert_config b/ssh_server_tests/ssh/short_lived_cert_config deleted file mode 100644 index e29356fe..00000000 --- a/ssh_server_tests/ssh/short_lived_cert_config +++ /dev/null @@ -1,11 +0,0 @@ -Host * - AddressFamily inet - -Host {{hostname}} - ProxyCommand bash -c '/usr/local/bin/cloudflared access ssh-gen --hostname %h; ssh -F /root/.ssh/short_lived_cert_config -tt %r@cfpipe-{{hostname}} >&2 <&1' - -Host cfpipe-{{hostname}} - HostName {{hostname}} - ProxyCommand /usr/local/bin/cloudflared access ssh --hostname %h - IdentityFile ~/.cloudflared/{{hostname}}-cf_key - CertificateFile ~/.cloudflared/{{hostname}}-cf_key-cert.pub diff --git a/ssh_server_tests/tests.py b/ssh_server_tests/tests.py deleted file mode 100644 index d27fcd7f..00000000 --- a/ssh_server_tests/tests.py +++ /dev/null @@ -1,195 +0,0 @@ -""" -Cloudflared Integration tests -""" - -import unittest -import subprocess -import os -import tempfile -from contextlib import contextmanager - -from pexpect import pxssh - - -class TestSSHBase(unittest.TestCase): - """ - SSH test base class containing constants and helper funcs - """ - - HOSTNAME = os.environ["SSH_HOSTNAME"] - SSH_USER = os.environ["SSH_USER"] - SSH_TARGET = f"{SSH_USER}@{HOSTNAME}" - AUTHORIZED_KEYS_SSH_CONFIG = os.environ["AUTHORIZED_KEYS_SSH_CONFIG"] - SHORT_LIVED_CERT_SSH_CONFIG = os.environ["SHORT_LIVED_CERT_SSH_CONFIG"] - SSH_OPTIONS = {"StrictHostKeyChecking": "no"} - - @classmethod - def get_ssh_command(cls, pty=True): - """ - Return ssh command arg list. If pty is true, a PTY is forced for the session. - """ - cmd = [ - "ssh", - "-o", - "StrictHostKeyChecking=no", - "-F", - cls.AUTHORIZED_KEYS_SSH_CONFIG, - cls.SSH_TARGET, - ] - if not pty: - cmd += ["-T"] - else: - cmd += ["-tt"] - - return cmd - - @classmethod - @contextmanager - def ssh_session_manager(cls, *args, **kwargs): - """ - Context manager for interacting with a pxssh session. - Disables pty echo on the remote server and ensures session is terminated afterward. - """ - session = pxssh.pxssh(options=cls.SSH_OPTIONS) - - session.login( - cls.HOSTNAME, - username=cls.SSH_USER, - original_prompt=r"[#@$]", - ssh_config=kwargs.get("ssh_config", cls.AUTHORIZED_KEYS_SSH_CONFIG), - ssh_tunnels=kwargs.get("ssh_tunnels", {}), - ) - try: - session.sendline("stty -echo") - session.prompt() - yield session - finally: - session.logout() - - @staticmethod - def get_command_output(session, cmd): - """ - Executes command on remote ssh server and waits for prompt. - Returns command output - """ - session.sendline(cmd) - session.prompt() - return session.before.decode().strip() - - def exec_command(self, cmd, shell=False): - """ - Executes command locally. Raises Assertion error for non-zero return code. - Returns stdout and stderr - """ - proc = subprocess.Popen( - cmd, stderr=subprocess.PIPE, stdout=subprocess.PIPE, shell=shell - ) - raw_out, raw_err = proc.communicate() - - out = raw_out.decode() - err = raw_err.decode() - self.assertEqual(proc.returncode, 0, msg=f"stdout: {out} stderr: {err}") - return out.strip(), err.strip() - - -class TestSSHCommandExec(TestSSHBase): - """ - Tests inline ssh command exec - """ - - # Name of file to be downloaded over SCP on remote server. - REMOTE_SCP_FILENAME = os.environ["REMOTE_SCP_FILENAME"] - - @classmethod - def get_scp_base_command(cls): - return [ - "scp", - "-o", - "StrictHostKeyChecking=no", - "-v", - "-F", - cls.AUTHORIZED_KEYS_SSH_CONFIG, - ] - - @unittest.skip( - "This creates files on the remote. Should be skipped until server is dockerized." - ) - def test_verbose_scp_sink_mode(self): - with tempfile.NamedTemporaryFile() as fl: - self.exec_command( - self.get_scp_base_command() + [fl.name, f"{self.SSH_TARGET}:"] - ) - - def test_verbose_scp_source_mode(self): - with tempfile.TemporaryDirectory() as tmpdirname: - self.exec_command( - self.get_scp_base_command() - + [f"{self.SSH_TARGET}:{self.REMOTE_SCP_FILENAME}", tmpdirname] - ) - local_filename = os.path.join(tmpdirname, self.REMOTE_SCP_FILENAME) - - self.assertTrue(os.path.exists(local_filename)) - self.assertTrue(os.path.getsize(local_filename) > 0) - - def test_pty_command(self): - base_cmd = self.get_ssh_command() - - out, _ = self.exec_command(base_cmd + ["whoami"]) - self.assertEqual(out.strip().lower(), self.SSH_USER.lower()) - - out, _ = self.exec_command(base_cmd + ["tty"]) - self.assertNotEqual(out, "not a tty") - - def test_non_pty_command(self): - base_cmd = self.get_ssh_command(pty=False) - - out, _ = self.exec_command(base_cmd + ["whoami"]) - self.assertEqual(out.strip().lower(), self.SSH_USER.lower()) - - out, _ = self.exec_command(base_cmd + ["tty"]) - self.assertEqual(out, "not a tty") - - -class TestSSHShell(TestSSHBase): - """ - Tests interactive SSH shell - """ - - # File path to a file on the remote server with root only read privileges. - ROOT_ONLY_TEST_FILE_PATH = os.environ["ROOT_ONLY_TEST_FILE_PATH"] - - def test_ssh_pty(self): - with self.ssh_session_manager() as session: - - # Test shell launched as correct user - username = self.get_command_output(session, "whoami") - self.assertEqual(username.lower(), self.SSH_USER.lower()) - - # Test USER env variable set - user_var = self.get_command_output(session, "echo $USER") - self.assertEqual(user_var.lower(), self.SSH_USER.lower()) - - # Test HOME env variable set to true user home. - home_env = self.get_command_output(session, "echo $HOME") - pwd = self.get_command_output(session, "pwd") - self.assertEqual(pwd, home_env) - - # Test shell launched in correct user home dir. - self.assertIn(username, pwd) - - # Ensure shell launched with correct user's permissions and privs. - # Can't read root owned 0700 files. - output = self.get_command_output( - session, f"cat {self.ROOT_ONLY_TEST_FILE_PATH}" - ) - self.assertIn("Permission denied", output) - - def test_short_lived_cert_auth(self): - with self.ssh_session_manager( - ssh_config=self.SHORT_LIVED_CERT_SSH_CONFIG - ) as session: - username = self.get_command_output(session, "whoami") - self.assertEqual(username.lower(), self.SSH_USER.lower()) - - -unittest.main() diff --git a/h2mux/booleanfuse.go b/supervisor/fuse.go similarity index 69% rename from h2mux/booleanfuse.go rename to supervisor/fuse.go index 8407ecc7..3a143437 100644 --- a/h2mux/booleanfuse.go +++ b/supervisor/fuse.go @@ -1,23 +1,23 @@ -package h2mux +package supervisor import "sync" -// BooleanFuse is a data structure that can be set once to a particular value using Fuse(value). +// booleanFuse is a data structure that can be set once to a particular value using Fuse(value). // Subsequent calls to Fuse() will have no effect. -type BooleanFuse struct { +type booleanFuse struct { value int32 mu sync.Mutex cond *sync.Cond } -func NewBooleanFuse() *BooleanFuse { - f := &BooleanFuse{} +func newBooleanFuse() *booleanFuse { + f := &booleanFuse{} f.cond = sync.NewCond(&f.mu) return f } // Value gets the value -func (f *BooleanFuse) Value() bool { +func (f *booleanFuse) Value() bool { // 0: unset // 1: set true // 2: set false @@ -26,7 +26,7 @@ func (f *BooleanFuse) Value() bool { return f.value == 1 } -func (f *BooleanFuse) Fuse(result bool) { +func (f *booleanFuse) Fuse(result bool) { f.mu.Lock() defer f.mu.Unlock() newValue := int32(2) @@ -40,7 +40,7 @@ func (f *BooleanFuse) Fuse(result bool) { } // Await blocks until Fuse has been called at least once. -func (f *BooleanFuse) Await() bool { +func (f *booleanFuse) Await() bool { f.mu.Lock() defer f.mu.Unlock() for f.value == 0 { diff --git a/supervisor/supervisor.go b/supervisor/supervisor.go index 8dde4e76..8736963c 100644 --- a/supervisor/supervisor.go +++ b/supervisor/supervisor.go @@ -7,12 +7,15 @@ import ( "strings" "time" + "github.com/prometheus/client_golang/prometheus" "github.com/quic-go/quic-go" "github.com/rs/zerolog" "github.com/cloudflare/cloudflared/connection" "github.com/cloudflare/cloudflared/edgediscovery" + "github.com/cloudflare/cloudflared/ingress" "github.com/cloudflare/cloudflared/orchestration" + v3 "github.com/cloudflare/cloudflared/quic/v3" "github.com/cloudflare/cloudflared/retry" "github.com/cloudflare/cloudflared/signal" "github.com/cloudflare/cloudflared/tunnelstate" @@ -80,9 +83,14 @@ func NewSupervisor(config *TunnelConfig, orchestrator *orchestration.Orchestrato edgeAddrHandler := NewIPAddrFallback(config.MaxEdgeAddrRetries) edgeBindAddr := config.EdgeBindAddr + datagramMetrics := v3.NewMetrics(prometheus.DefaultRegisterer) + sessionManager := v3.NewSessionManager(datagramMetrics, config.Log, ingress.DialUDPAddrPort) + edgeTunnelServer := EdgeTunnelServer{ config: config, orchestrator: orchestrator, + sessionManager: sessionManager, + datagramMetrics: datagramMetrics, edgeAddrs: edgeIPs, edgeAddrHandler: edgeAddrHandler, edgeBindAddr: edgeBindAddr, @@ -111,9 +119,9 @@ func (s *Supervisor) Run( ctx context.Context, connectedSignal *signal.Signal, ) error { - if s.config.PacketConfig != nil { + if s.config.ICMPRouterServer != nil { go func() { - if err := s.config.PacketConfig.ICMPRouter.Serve(ctx); err != nil { + if err := s.config.ICMPRouterServer.Serve(ctx); err != nil { if errors.Is(err, net.ErrClosed) { s.log.Logger().Info().Err(err).Msg("icmp router terminated") } else { diff --git a/supervisor/tunnel.go b/supervisor/tunnel.go index 85637798..09983e11 100644 --- a/supervisor/tunnel.go +++ b/supervisor/tunnel.go @@ -5,7 +5,9 @@ import ( "crypto/tls" "fmt" "net" + "net/netip" "runtime/debug" + "slices" "strings" "sync" "time" @@ -19,11 +21,11 @@ import ( "github.com/cloudflare/cloudflared/edgediscovery" "github.com/cloudflare/cloudflared/edgediscovery/allregions" "github.com/cloudflare/cloudflared/features" - "github.com/cloudflare/cloudflared/h2mux" "github.com/cloudflare/cloudflared/ingress" "github.com/cloudflare/cloudflared/management" "github.com/cloudflare/cloudflared/orchestration" quicpogs "github.com/cloudflare/cloudflared/quic" + v3 "github.com/cloudflare/cloudflared/quic/v3" "github.com/cloudflare/cloudflared/retry" "github.com/cloudflare/cloudflared/signal" "github.com/cloudflare/cloudflared/tunnelrpc/pogs" @@ -61,7 +63,7 @@ type TunnelConfig struct { NamedTunnel *connection.TunnelProperties ProtocolSelector connection.ProtocolSelector EdgeTLSConfigs map[connection.Protocol]*tls.Config - PacketConfig *ingress.GlobalRouterConfig + ICMPRouterServer ingress.ICMPRouterServer RPCTimeout time.Duration WriteStreamTimeout time.Duration @@ -87,14 +89,6 @@ func (c *TunnelConfig) connectionOptions(originLocalAddr string, numPreviousAtte } } -func (c *TunnelConfig) SupportedFeatures() []string { - supported := []string{features.FeatureSerializedHeaders} - if c.NamedTunnel == nil { - supported = append(supported, features.FeatureQuickReconnects) - } - return supported -} - func StartTunnelDaemon( ctx context.Context, config *TunnelConfig, @@ -181,6 +175,8 @@ func (f *ipAddrFallback) ShouldGetNewAddress(connIndex uint8, err error) (needsN type EdgeTunnelServer struct { config *TunnelConfig orchestrator *orchestration.Orchestrator + sessionManager v3.SessionManager + datagramMetrics v3.Metrics edgeAddrHandler EdgeAddrHandler edgeAddrs *edgediscovery.Edge edgeBindAddr net.IP @@ -199,7 +195,7 @@ func (e *EdgeTunnelServer) Serve(ctx context.Context, connIndex uint8, protocolF haConnections.Inc() defer haConnections.Dec() - connectedFuse := h2mux.NewBooleanFuse() + connectedFuse := newBooleanFuse() go func() { if connectedFuse.Await() { connectedSignal.Notify() @@ -375,7 +371,7 @@ func (e *EdgeTunnelServer) serveTunnel( connLog *ConnAwareLogger, addr *allregions.EdgeAddr, connIndex uint8, - fuse *h2mux.BooleanFuse, + fuse *booleanFuse, backoff *protocolFallback, protocol connection.Protocol, ) (err error, recoverable bool) { @@ -441,7 +437,7 @@ func (e *EdgeTunnelServer) serveConnection( connLog *ConnAwareLogger, addr *allregions.EdgeAddr, connIndex uint8, - fuse *h2mux.BooleanFuse, + fuse *booleanFuse, backoff *protocolFallback, protocol connection.Protocol, ) (err error, recoverable bool) { @@ -466,7 +462,7 @@ func (e *EdgeTunnelServer) serveConnection( case connection.QUIC: connOptions := e.config.connectionOptions(addr.UDP.String(), uint8(backoff.Retries())) return e.serveQUIC(ctx, - addr.UDP, + addr.UDP.AddrPort(), connLog, connOptions, controlStream, @@ -549,7 +545,7 @@ func (e *EdgeTunnelServer) serveHTTP2( func (e *EdgeTunnelServer) serveQUIC( ctx context.Context, - edgeAddr *net.UDPAddr, + edgeAddr netip.AddrPort, connLogger *ConnAwareLogger, connOptions *pogs.ConnectionOptions, controlStreamHandler connection.ControlStreamHandler, @@ -572,7 +568,7 @@ func (e *EdgeTunnelServer) serveQUIC( // quic-go 0.44 increases the initial packet size to 1280 by default. That breaks anyone running tunnel through WARP // because WARP MTU is 1280. var initialPacketSize uint16 = 1252 - if edgeAddr.IP.To4() == nil { + if edgeAddr.Addr().Is4() { initialPacketSize = 1232 } @@ -590,31 +586,69 @@ func (e *EdgeTunnelServer) serveQUIC( InitialPacketSize: initialPacketSize, } - quicConn, err := connection.NewQUICConnection( + // Dial the QUIC connection to the edge + conn, err := connection.DialQuic( ctx, quicConfig, + tlsConfig, edgeAddr, e.edgeBindAddr, connIndex, - tlsConfig, - e.orchestrator, - connOptions, - controlStreamHandler, connLogger.Logger(), - e.config.PacketConfig, - e.config.RPCTimeout, - e.config.WriteStreamTimeout, ) if err != nil { - connLogger.ConnAwareLogger().Err(err).Msgf("Failed to create new quic connection") + connLogger.ConnAwareLogger().Err(err).Msgf("Failed to dial a quic connection") return err, true } + var datagramSessionManager connection.DatagramSessionHandler + if slices.Contains(connOptions.Client.Features, features.FeatureDatagramV3) { + datagramSessionManager = connection.NewDatagramV3Connection( + ctx, + conn, + e.sessionManager, + e.config.ICMPRouterServer, + connIndex, + e.datagramMetrics, + connLogger.Logger(), + ) + } else { + datagramSessionManager = connection.NewDatagramV2Connection( + ctx, + conn, + e.config.ICMPRouterServer, + connIndex, + e.config.RPCTimeout, + e.config.WriteStreamTimeout, + connLogger.Logger(), + ) + } + + // Wrap the [quic.Connection] as a TunnelConnection + tunnelConn, err := connection.NewTunnelConnection( + ctx, + conn, + connIndex, + e.orchestrator, + datagramSessionManager, + controlStreamHandler, + connOptions, + e.config.RPCTimeout, + e.config.WriteStreamTimeout, + e.config.GracePeriod, + connLogger.Logger(), + ) + if err != nil { + connLogger.ConnAwareLogger().Err(err).Msgf("Failed to create new tunnel connection") + return err, true + } + + // Serve the TunnelConnection errGroup, serveCtx := errgroup.WithContext(ctx) errGroup.Go(func() error { - err := quicConn.Serve(serveCtx) + err := tunnelConn.Serve(serveCtx) if err != nil { - connLogger.ConnAwareLogger().Err(err).Msg("Failed to serve quic connection") + connLogger.ConnAwareLogger().Err(err).Msg("Failed to serve tunnel connection") } return err }) @@ -623,8 +657,8 @@ func (e *EdgeTunnelServer) serveQUIC( err := listenReconnect(serveCtx, e.reconnectCh, e.gracefulShutdownC) if err != nil { // forcefully break the connection (this is only used for testing) - // errgroup will return context canceled for the quicConn.Serve - connLogger.Logger().Debug().Msg("Forcefully breaking quic connection") + // errgroup will return context canceled for the tunnelConn.Serve + connLogger.Logger().Debug().Msg("Forcefully breaking tunnel connection") } return err }) @@ -644,7 +678,7 @@ func listenReconnect(ctx context.Context, reconnectCh <-chan ReconnectSignal, gr } type connectedFuse struct { - fuse *h2mux.BooleanFuse + fuse *booleanFuse backoff *protocolFallback } diff --git a/tracing/tracing_test.go b/tracing/tracing_test.go index 5c478ed6..826e4f12 100644 --- a/tracing/tracing_test.go +++ b/tracing/tracing_test.go @@ -72,3 +72,9 @@ func TestAddingSpansWithNilMap(t *testing.T) { // a panic shouldn't occur tr.AddSpans(nil) } + +func FuzzNewIdentity(f *testing.F) { + f.Fuzz(func(t *testing.T, trace string) { + _, _ = NewIdentity(trace) + }) +} diff --git a/tunnelrpc/registration_client.go b/tunnelrpc/registration_client.go index f41819f3..7d945f58 100644 --- a/tunnelrpc/registration_client.go +++ b/tunnelrpc/registration_client.go @@ -23,7 +23,7 @@ type RegistrationClient interface { edgeAddress net.IP, ) (*pogs.ConnectionDetails, error) SendLocalConfiguration(ctx context.Context, config []byte) error - GracefulShutdown(ctx context.Context, gracePeriod time.Duration) + GracefulShutdown(ctx context.Context, gracePeriod time.Duration) error Close() } @@ -79,7 +79,7 @@ func (r *registrationClient) SendLocalConfiguration(ctx context.Context, config return err } -func (r *registrationClient) GracefulShutdown(ctx context.Context, gracePeriod time.Duration) { +func (r *registrationClient) GracefulShutdown(ctx context.Context, gracePeriod time.Duration) error { ctx, cancel := context.WithTimeout(ctx, gracePeriod) defer cancel() defer metrics.CapnpMetrics.ClientOperations.WithLabelValues(metrics.Registration, metrics.OperationUnregisterConnection).Inc() @@ -88,7 +88,9 @@ func (r *registrationClient) GracefulShutdown(ctx context.Context, gracePeriod t err := r.client.UnregisterConnection(ctx) if err != nil { metrics.CapnpMetrics.ClientFailures.WithLabelValues(metrics.Registration, metrics.OperationUnregisterConnection).Inc() + return err } + return nil } func (r *registrationClient) Close() { diff --git a/tunnelstate/conntracker.go b/tunnelstate/conntracker.go index 426ba483..d0119f10 100644 --- a/tunnelstate/conntracker.go +++ b/tunnelstate/conntracker.go @@ -1,6 +1,7 @@ package tunnelstate import ( + "net" "sync" "github.com/rs/zerolog" @@ -9,54 +10,58 @@ import ( ) type ConnTracker struct { - sync.RWMutex + mutex sync.RWMutex // int is the connection Index connectionInfo map[uint8]ConnectionInfo log *zerolog.Logger } type ConnectionInfo struct { - IsConnected bool - Protocol connection.Protocol + IsConnected bool `json:"isConnected,omitempty"` + Protocol connection.Protocol `json:"protocol,omitempty"` + EdgeAddress net.IP `json:"edgeAddress,omitempty"` } -func NewConnTracker(log *zerolog.Logger) *ConnTracker { +// Convinience struct to extend the connection with its index. +type IndexedConnectionInfo struct { + ConnectionInfo + Index uint8 `json:"index,omitempty"` +} + +func NewConnTracker( + log *zerolog.Logger, +) *ConnTracker { return &ConnTracker{ connectionInfo: make(map[uint8]ConnectionInfo, 0), log: log, } } -func MockedConnTracker(mocked map[uint8]ConnectionInfo) *ConnTracker { - return &ConnTracker{ - connectionInfo: mocked, - } -} - func (ct *ConnTracker) OnTunnelEvent(c connection.Event) { switch c.EventType { case connection.Connected: - ct.Lock() + ct.mutex.Lock() ci := ConnectionInfo{ IsConnected: true, Protocol: c.Protocol, + EdgeAddress: c.EdgeAddress, } ct.connectionInfo[c.Index] = ci - ct.Unlock() + ct.mutex.Unlock() case connection.Disconnected, connection.Reconnecting, connection.RegisteringTunnel, connection.Unregistering: - ct.Lock() + ct.mutex.Lock() ci := ct.connectionInfo[c.Index] ci.IsConnected = false ct.connectionInfo[c.Index] = ci - ct.Unlock() + ct.mutex.Unlock() default: ct.log.Error().Msgf("Unknown connection event case %v", c) } } func (ct *ConnTracker) CountActiveConns() uint { - ct.RLock() - defer ct.RUnlock() + ct.mutex.RLock() + defer ct.mutex.RUnlock() active := uint(0) for _, ci := range ct.connectionInfo { if ci.IsConnected { @@ -69,8 +74,8 @@ func (ct *ConnTracker) CountActiveConns() uint { // HasConnectedWith checks if we've ever had a successful connection to the edge // with said protocol. func (ct *ConnTracker) HasConnectedWith(protocol connection.Protocol) bool { - ct.RLock() - defer ct.RUnlock() + ct.mutex.RLock() + defer ct.mutex.RUnlock() for _, ci := range ct.connectionInfo { if ci.Protocol == protocol { return true @@ -78,3 +83,21 @@ func (ct *ConnTracker) HasConnectedWith(protocol connection.Protocol) bool { } return false } + +// Returns the connection information iff it is connected this +// also leverages the [IndexedConnectionInfo] to also provide the connection index +func (ct *ConnTracker) GetActiveConnections() []IndexedConnectionInfo { + ct.mutex.RLock() + defer ct.mutex.RUnlock() + + connections := make([]IndexedConnectionInfo, 0) + + for key, value := range ct.connectionInfo { + if value.IsConnected { + info := IndexedConnectionInfo{value, key} + connections = append(connections, info) + } + } + + return connections +} diff --git a/validation/validation_test.go b/validation/validation_test.go index 9f4a2ceb..3e4534cf 100644 --- a/validation/validation_test.go +++ b/validation/validation_test.go @@ -197,3 +197,10 @@ func createSecureMockServerAndClient(handler http.Handler) (*httptest.Server, *h return server, client, nil } + +func FuzzNewAccessValidator(f *testing.F) { + f.Fuzz(func(t *testing.T, domain string, issuer string, applicationAUD string) { + ctx := context.Background() + _, _ = NewAccessValidator(ctx, domain, issuer, applicationAUD) + }) +} diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/config.go b/vendor/github.com/coredns/coredns/core/dnsserver/config.go index 3da86271..9e111665 100644 --- a/vendor/github.com/coredns/coredns/core/dnsserver/config.go +++ b/vendor/github.com/coredns/coredns/core/dnsserver/config.go @@ -5,6 +5,7 @@ import ( "crypto/tls" "fmt" "net/http" + "time" "github.com/coredns/caddy" "github.com/coredns/coredns/plugin" @@ -53,6 +54,11 @@ type Config struct { // TLSConfig when listening for encrypted connections (gRPC, DNS-over-TLS). TLSConfig *tls.Config + // Timeouts for TCP, TLS and HTTPS servers. + ReadTimeout time.Duration + WriteTimeout time.Duration + IdleTimeout time.Duration + // TSIG secrets, [name]key. TsigSecret map[string]string diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/https.go b/vendor/github.com/coredns/coredns/core/dnsserver/https.go index 382e06ef..015c52ec 100644 --- a/vendor/github.com/coredns/coredns/core/dnsserver/https.go +++ b/vendor/github.com/coredns/coredns/core/dnsserver/https.go @@ -4,13 +4,11 @@ import ( "net" "net/http" - "github.com/coredns/coredns/plugin/pkg/nonwriter" + "github.com/miekg/dns" ) -// DoHWriter is a nonwriter.Writer that adds more specific LocalAddr and RemoteAddr methods. +// DoHWriter is a dns.ResponseWriter that adds more specific LocalAddr and RemoteAddr methods. type DoHWriter struct { - nonwriter.Writer - // raddr is the remote's address. This can be optionally set. raddr net.Addr // laddr is our address. This can be optionally set. @@ -18,13 +16,50 @@ type DoHWriter struct { // request is the HTTP request we're currently handling. request *http.Request + + // Msg is a response to be written to the client. + Msg *dns.Msg +} + +// WriteMsg stores the message to be written to the client. +func (d *DoHWriter) WriteMsg(m *dns.Msg) error { + d.Msg = m + return nil +} + +// Write stores the message to be written to the client. +func (d *DoHWriter) Write(b []byte) (int, error) { + d.Msg = new(dns.Msg) + return len(b), d.Msg.Unpack(b) } // RemoteAddr returns the remote address. -func (d *DoHWriter) RemoteAddr() net.Addr { return d.raddr } +func (d *DoHWriter) RemoteAddr() net.Addr { + return d.raddr +} // LocalAddr returns the local address. -func (d *DoHWriter) LocalAddr() net.Addr { return d.laddr } +func (d *DoHWriter) LocalAddr() net.Addr { + return d.laddr +} -// Request returns the HTTP request -func (d *DoHWriter) Request() *http.Request { return d.request } +// Request returns the HTTP request. +func (d *DoHWriter) Request() *http.Request { + return d.request +} + +// Close no-op implementation. +func (d *DoHWriter) Close() error { + return nil +} + +// TsigStatus no-op implementation. +func (d *DoHWriter) TsigStatus() error { + return nil +} + +// TsigTimersOnly no-op implementation. +func (d *DoHWriter) TsigTimersOnly(_ bool) {} + +// Hijack no-op implementation. +func (d *DoHWriter) Hijack() {} diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/quic.go b/vendor/github.com/coredns/coredns/core/dnsserver/quic.go new file mode 100644 index 00000000..5c2890a7 --- /dev/null +++ b/vendor/github.com/coredns/coredns/core/dnsserver/quic.go @@ -0,0 +1,60 @@ +package dnsserver + +import ( + "encoding/binary" + "net" + + "github.com/miekg/dns" + "github.com/quic-go/quic-go" +) + +type DoQWriter struct { + localAddr net.Addr + remoteAddr net.Addr + stream quic.Stream + Msg *dns.Msg +} + +func (w *DoQWriter) Write(b []byte) (int, error) { + b = AddPrefix(b) + return w.stream.Write(b) +} + +func (w *DoQWriter) WriteMsg(m *dns.Msg) error { + bytes, err := m.Pack() + if err != nil { + return err + } + + _, err = w.Write(bytes) + if err != nil { + return err + } + + return w.Close() +} + +// Close sends the STREAM FIN signal. +// The server MUST send the response(s) on the same stream and MUST +// indicate, after the last response, through the STREAM FIN +// mechanism that no further data will be sent on that stream. +// See https://www.rfc-editor.org/rfc/rfc9250#section-4.2-7 +func (w *DoQWriter) Close() error { + return w.stream.Close() +} + +// AddPrefix adds a 2-byte prefix with the DNS message length. +func AddPrefix(b []byte) (m []byte) { + m = make([]byte, 2+len(b)) + binary.BigEndian.PutUint16(m, uint16(len(b))) + copy(m[2:], b) + + return m +} + +// These methods implement the dns.ResponseWriter interface from Go DNS. +func (w *DoQWriter) TsigStatus() error { return nil } +func (w *DoQWriter) TsigTimersOnly(b bool) {} +func (w *DoQWriter) Hijack() {} +func (w *DoQWriter) LocalAddr() net.Addr { return w.localAddr } +func (w *DoQWriter) RemoteAddr() net.Addr { return w.remoteAddr } diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/register.go b/vendor/github.com/coredns/coredns/core/dnsserver/register.go index e94accc2..ae001b9f 100644 --- a/vendor/github.com/coredns/coredns/core/dnsserver/register.go +++ b/vendor/github.com/coredns/coredns/core/dnsserver/register.go @@ -1,7 +1,6 @@ package dnsserver import ( - "flag" "fmt" "net" "time" @@ -17,12 +16,7 @@ import ( const serverType = "dns" -// Any flags defined here, need to be namespaced to the serverType other -// wise they potentially clash with other server types. func init() { - flag.StringVar(&Port, serverType+".port", DefaultPort, "Default port") - flag.StringVar(&Port, "p", DefaultPort, "Default port") - caddy.RegisterServerType(serverType, caddy.ServerType{ Directives: func() []string { return Directives }, DefaultInput: func() caddy.Input { @@ -88,6 +82,8 @@ func (h *dnsContext) InspectServerBlocks(sourceFile string, serverBlocks []caddy port = Port case transport.TLS: port = transport.TLSPort + case transport.QUIC: + port = transport.QUICPort case transport.GRPC: port = transport.GRPCPort case transport.HTTPS: @@ -147,7 +143,12 @@ func (h *dnsContext) MakeServers() ([]caddy.Server, error) { c.ListenHosts = c.firstConfigInBlock.ListenHosts c.Debug = c.firstConfigInBlock.Debug c.Stacktrace = c.firstConfigInBlock.Stacktrace - c.TLSConfig = c.firstConfigInBlock.TLSConfig + + // Fork TLSConfig for each encrypted connection + c.TLSConfig = c.firstConfigInBlock.TLSConfig.Clone() + c.ReadTimeout = c.firstConfigInBlock.ReadTimeout + c.WriteTimeout = c.firstConfigInBlock.WriteTimeout + c.IdleTimeout = c.firstConfigInBlock.IdleTimeout c.TsigSecret = c.firstConfigInBlock.TsigSecret } @@ -175,6 +176,13 @@ func (h *dnsContext) MakeServers() ([]caddy.Server, error) { } servers = append(servers, s) + case transport.QUIC: + s, err := NewServerQUIC(addr, group) + if err != nil { + return nil, err + } + servers = append(servers, s) + case transport.GRPC: s, err := NewServergRPC(addr, group) if err != nil { @@ -221,7 +229,8 @@ func (c *Config) AddPlugin(m plugin.Plugin) { } // registerHandler adds a handler to a site's handler registration. Handlers -// use this to announce that they exist to other plugin. +// +// use this to announce that they exist to other plugin. func (c *Config) registerHandler(h plugin.Handler) { if c.registry == nil { c.registry = make(map[string]plugin.Handler) @@ -287,7 +296,7 @@ func (h *dnsContext) validateZonesAndListeningAddresses() error { return nil } -// groupSiteConfigsByListenAddr groups site configs by their listen +// groupConfigsByListenAddr groups site configs by their listen // (bind) address, so sites that use the same listener can be served // on the same server instance. The return value maps the listen // address (what you pass into net.Listen) to the list of site configs. diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/server.go b/vendor/github.com/coredns/coredns/core/dnsserver/server.go index 478287bf..2107e8d0 100644 --- a/vendor/github.com/coredns/coredns/core/dnsserver/server.go +++ b/vendor/github.com/coredns/coredns/core/dnsserver/server.go @@ -44,6 +44,9 @@ type Server struct { debug bool // disable recover() stacktrace bool // enable stacktrace in recover error log classChaos bool // allow non-INET class queries + idleTimeout time.Duration // Idle timeout for TCP + readTimeout time.Duration // Read timeout for TCP + writeTimeout time.Duration // Write timeout for TCP tsigSecret map[string]string } @@ -60,6 +63,9 @@ func NewServer(addr string, group []*Config) (*Server, error) { Addr: addr, zones: make(map[string][]*Config), graceTimeout: 5 * time.Second, + idleTimeout: 10 * time.Second, + readTimeout: 3 * time.Second, + writeTimeout: 5 * time.Second, tsigSecret: make(map[string]string), } @@ -81,6 +87,17 @@ func NewServer(addr string, group []*Config) (*Server, error) { // append the config to the zone's configs s.zones[site.Zone] = append(s.zones[site.Zone], site) + // set timeouts + if site.ReadTimeout != 0 { + s.readTimeout = site.ReadTimeout + } + if site.WriteTimeout != 0 { + s.writeTimeout = site.WriteTimeout + } + if site.IdleTimeout != 0 { + s.idleTimeout = site.IdleTimeout + } + // copy tsig secrets for key, secret := range site.TsigSecret { s.tsigSecret[key] = secret @@ -130,11 +147,22 @@ var _ caddy.GracefulServer = &Server{} // This implements caddy.TCPServer interface. func (s *Server) Serve(l net.Listener) error { s.m.Lock() - s.server[tcp] = &dns.Server{Listener: l, Net: "tcp", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { - ctx := context.WithValue(context.Background(), Key{}, s) - ctx = context.WithValue(ctx, LoopKey{}, 0) - s.ServeDNS(ctx, w, r) - }), TsigSecret: s.tsigSecret} + + s.server[tcp] = &dns.Server{Listener: l, + Net: "tcp", + TsigSecret: s.tsigSecret, + MaxTCPQueries: tcpMaxQueries, + ReadTimeout: s.readTimeout, + WriteTimeout: s.writeTimeout, + IdleTimeout: func() time.Duration { + return s.idleTimeout + }, + Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + ctx := context.WithValue(context.Background(), Key{}, s) + ctx = context.WithValue(ctx, LoopKey{}, 0) + s.ServeDNS(ctx, w, r) + })} + s.m.Unlock() return s.server[tcp].ActivateAndServe() @@ -404,6 +432,8 @@ func errorAndMetricsFunc(server string, w dns.ResponseWriter, r *dns.Msg, rc int const ( tcp = 0 udp = 1 + + tcpMaxQueries = -1 ) type ( diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/server_https.go b/vendor/github.com/coredns/coredns/core/dnsserver/server_https.go index eda39c14..cddf5989 100644 --- a/vendor/github.com/coredns/coredns/core/dnsserver/server_https.go +++ b/vendor/github.com/coredns/coredns/core/dnsserver/server_https.go @@ -75,9 +75,9 @@ func NewServerHTTPS(addr string, group []*Config) (*ServerHTTPS, error) { } srv := &http.Server{ - ReadTimeout: 5 * time.Second, - WriteTimeout: 10 * time.Second, - IdleTimeout: 120 * time.Second, + ReadTimeout: s.readTimeout, + WriteTimeout: s.writeTimeout, + IdleTimeout: s.idleTimeout, ErrorLog: stdlog.New(&loggerAdapter{}, "", 0), } sh := &ServerHTTPS{ diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/server_quic.go b/vendor/github.com/coredns/coredns/core/dnsserver/server_quic.go new file mode 100644 index 00000000..ba7867cf --- /dev/null +++ b/vendor/github.com/coredns/coredns/core/dnsserver/server_quic.go @@ -0,0 +1,346 @@ +package dnsserver + +import ( + "context" + "crypto/tls" + "encoding/binary" + "errors" + "fmt" + "io" + "math" + "net" + + "github.com/coredns/coredns/plugin/metrics/vars" + clog "github.com/coredns/coredns/plugin/pkg/log" + "github.com/coredns/coredns/plugin/pkg/reuseport" + "github.com/coredns/coredns/plugin/pkg/transport" + + "github.com/miekg/dns" + "github.com/quic-go/quic-go" +) + +const ( + // DoQCodeNoError is used when the connection or stream needs to be + // closed, but there is no error to signal. + DoQCodeNoError quic.ApplicationErrorCode = 0 + + // DoQCodeInternalError signals that the DoQ implementation encountered + // an internal error and is incapable of pursuing the transaction or the + // connection. + DoQCodeInternalError quic.ApplicationErrorCode = 1 + + // DoQCodeProtocolError signals that the DoQ implementation encountered + // a protocol error and is forcibly aborting the connection. + DoQCodeProtocolError quic.ApplicationErrorCode = 2 +) + +// ServerQUIC represents an instance of a DNS-over-QUIC server. +type ServerQUIC struct { + *Server + listenAddr net.Addr + tlsConfig *tls.Config + quicConfig *quic.Config + quicListener *quic.Listener +} + +// NewServerQUIC returns a new CoreDNS QUIC server and compiles all plugin in to it. +func NewServerQUIC(addr string, group []*Config) (*ServerQUIC, error) { + s, err := NewServer(addr, group) + if err != nil { + return nil, err + } + // The *tls* plugin must make sure that multiple conflicting + // TLS configuration returns an error: it can only be specified once. + var tlsConfig *tls.Config + for _, z := range s.zones { + for _, conf := range z { + // Should we error if some configs *don't* have TLS? + tlsConfig = conf.TLSConfig + } + } + + if tlsConfig != nil { + tlsConfig.NextProtos = []string{"doq"} + } + + var quicConfig *quic.Config + quicConfig = &quic.Config{ + MaxIdleTimeout: s.idleTimeout, + MaxIncomingStreams: math.MaxUint16, + MaxIncomingUniStreams: math.MaxUint16, + // Enable 0-RTT by default for all connections on the server-side. + Allow0RTT: true, + } + + return &ServerQUIC{Server: s, tlsConfig: tlsConfig, quicConfig: quicConfig}, nil +} + +// ServePacket implements caddy.UDPServer interface. +func (s *ServerQUIC) ServePacket(p net.PacketConn) error { + s.m.Lock() + s.listenAddr = s.quicListener.Addr() + s.m.Unlock() + + return s.ServeQUIC() +} + +// ServeQUIC listens for incoming QUIC packets. +func (s *ServerQUIC) ServeQUIC() error { + for { + conn, err := s.quicListener.Accept(context.Background()) + if err != nil { + if s.isExpectedErr(err) { + s.closeQUICConn(conn, DoQCodeNoError) + return err + } + + s.closeQUICConn(conn, DoQCodeInternalError) + return err + } + + go s.serveQUICConnection(conn) + } +} + +// serveQUICConnection handles a new QUIC connection. It waits for new streams +// and passes them to serveQUICStream. +func (s *ServerQUIC) serveQUICConnection(conn quic.Connection) { + for { + // In DoQ, one query consumes one stream. + // The client MUST select the next available client-initiated bidirectional + // stream for each subsequent query on a QUIC connection. + stream, err := conn.AcceptStream(context.Background()) + if err != nil { + if s.isExpectedErr(err) { + s.closeQUICConn(conn, DoQCodeNoError) + return + } + + s.closeQUICConn(conn, DoQCodeInternalError) + return + } + + go s.serveQUICStream(stream, conn) + } +} + +func (s *ServerQUIC) serveQUICStream(stream quic.Stream, conn quic.Connection) { + buf, err := readDOQMessage(stream) + + // io.EOF does not really mean that there's any error, it is just + // the STREAM FIN indicating that there will be no data to read + // anymore from this stream. + if err != nil && err != io.EOF { + s.closeQUICConn(conn, DoQCodeProtocolError) + + return + } + + req := &dns.Msg{} + err = req.Unpack(buf) + if err != nil { + clog.Debugf("unpacking quic packet: %s", err) + s.closeQUICConn(conn, DoQCodeProtocolError) + + return + } + + if !validRequest(req) { + // If a peer encounters such an error condition, it is considered a + // fatal error. It SHOULD forcibly abort the connection using QUIC's + // CONNECTION_CLOSE mechanism and SHOULD use the DoQ error code + // DOQ_PROTOCOL_ERROR. + // See https://www.rfc-editor.org/rfc/rfc9250#section-4.3.3-3 + s.closeQUICConn(conn, DoQCodeProtocolError) + + return + } + + w := &DoQWriter{ + localAddr: conn.LocalAddr(), + remoteAddr: conn.RemoteAddr(), + stream: stream, + Msg: req, + } + + dnsCtx := context.WithValue(stream.Context(), Key{}, s.Server) + dnsCtx = context.WithValue(dnsCtx, LoopKey{}, 0) + s.ServeDNS(dnsCtx, w, req) + s.countResponse(DoQCodeNoError) +} + +// ListenPacket implements caddy.UDPServer interface. +func (s *ServerQUIC) ListenPacket() (net.PacketConn, error) { + p, err := reuseport.ListenPacket("udp", s.Addr[len(transport.QUIC+"://"):]) + if err != nil { + return nil, err + } + + s.m.Lock() + defer s.m.Unlock() + + s.quicListener, err = quic.Listen(p, s.tlsConfig, s.quicConfig) + if err != nil { + return nil, err + } + + return p, nil +} + +// OnStartupComplete lists the sites served by this server +// and any relevant information, assuming Quiet is false. +func (s *ServerQUIC) OnStartupComplete() { + if Quiet { + return + } + + out := startUpZones(transport.QUIC+"://", s.Addr, s.zones) + if out != "" { + fmt.Print(out) + } +} + +// Stop stops the server non-gracefully. It blocks until the server is totally stopped. +func (s *ServerQUIC) Stop() error { + s.m.Lock() + defer s.m.Unlock() + + if s.quicListener != nil { + return s.quicListener.Close() + } + + return nil +} + +// Serve implements caddy.TCPServer interface. +func (s *ServerQUIC) Serve(l net.Listener) error { return nil } + +// Listen implements caddy.TCPServer interface. +func (s *ServerQUIC) Listen() (net.Listener, error) { return nil, nil } + +// closeQUICConn quietly closes the QUIC connection. +func (s *ServerQUIC) closeQUICConn(conn quic.Connection, code quic.ApplicationErrorCode) { + if conn == nil { + return + } + + clog.Debugf("closing quic conn %s with code %d", conn.LocalAddr(), code) + err := conn.CloseWithError(code, "") + if err != nil { + clog.Debugf("failed to close quic connection with code %d: %s", code, err) + } + + // DoQCodeNoError metrics are already registered after s.ServeDNS() + if code != DoQCodeNoError { + s.countResponse(code) + } +} + +// validRequest checks for protocol errors in the unpacked DNS message. +// See https://www.rfc-editor.org/rfc/rfc9250.html#name-protocol-errors +func validRequest(req *dns.Msg) (ok bool) { + // 1. a client or server receives a message with a non-zero Message ID. + if req.Id != 0 { + return false + } + + // 2. an implementation receives a message containing the edns-tcp-keepalive + // EDNS(0) Option [RFC7828]. + if opt := req.IsEdns0(); opt != nil { + for _, option := range opt.Option { + if option.Option() == dns.EDNS0TCPKEEPALIVE { + clog.Debug("client sent EDNS0 TCP keepalive option") + + return false + } + } + } + + // 3. the client or server does not indicate the expected STREAM FIN after + // sending requests or responses. + // + // This is quite problematic to validate this case since this would imply + // we have to wait until STREAM FIN is arrived before we start processing + // the message. So we're consciously ignoring this case in this + // implementation. + + // 4. a server receives a "replayable" transaction in 0-RTT data + // + // The information necessary to validate this is not exposed by quic-go. + + return true +} + +// readDOQMessage reads a DNS over QUIC (DOQ) message from the given stream +// and returns the message bytes. +// Drafts of the RFC9250 did not require the 2-byte prefixed message length. +// Thus, we are only supporting the official version (DoQ v1). +func readDOQMessage(r io.Reader) ([]byte, error) { + // All DNS messages (queries and responses) sent over DoQ connections MUST + // be encoded as a 2-octet length field followed by the message content as + // specified in [RFC1035]. + // See https://www.rfc-editor.org/rfc/rfc9250.html#section-4.2-4 + sizeBuf := make([]byte, 2) + _, err := io.ReadFull(r, sizeBuf) + if err != nil { + return nil, err + } + + size := binary.BigEndian.Uint16(sizeBuf) + + if size == 0 { + return nil, fmt.Errorf("message size is 0: probably unsupported DoQ version") + } + + buf := make([]byte, size) + _, err = io.ReadFull(r, buf) + + // A client or server receives a STREAM FIN before receiving all the bytes + // for a message indicated in the 2-octet length field. + // See https://www.rfc-editor.org/rfc/rfc9250#section-4.3.3-2.2 + if size != uint16(len(buf)) { + return nil, fmt.Errorf("message size does not match 2-byte prefix") + } + + return buf, err +} + +// isExpectedErr returns true if err is an expected error, likely related to +// the current implementation. +func (s *ServerQUIC) isExpectedErr(err error) bool { + if err == nil { + return false + } + + // This error is returned when the QUIC listener was closed by us. As + // graceful shutdown is not implemented, the connection will be abruptly + // closed but there is no error to signal. + if errors.Is(err, quic.ErrServerClosed) { + return true + } + + // This error happens when the connection was closed due to a DoQ + // protocol error but there's still something to read in the closed stream. + // For example, when the message was sent without the prefixed length. + var qAppErr *quic.ApplicationError + if errors.As(err, &qAppErr) && qAppErr.ErrorCode == 2 { + return true + } + + // When a connection hits the idle timeout, quic.AcceptStream() returns + // an IdleTimeoutError. In this, case, we should just drop the connection + // with DoQCodeNoError. + var qIdleErr *quic.IdleTimeoutError + return errors.As(err, &qIdleErr) +} + +func (s *ServerQUIC) countResponse(code quic.ApplicationErrorCode) { + switch code { + case DoQCodeNoError: + vars.QUICResponsesCount.WithLabelValues(s.Addr, "0x0").Inc() + case DoQCodeInternalError: + vars.QUICResponsesCount.WithLabelValues(s.Addr, "0x1").Inc() + case DoQCodeProtocolError: + vars.QUICResponsesCount.WithLabelValues(s.Addr, "0x2").Inc() + } +} diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/server_tls.go b/vendor/github.com/coredns/coredns/core/dnsserver/server_tls.go index 6fff61d5..f2251efb 100644 --- a/vendor/github.com/coredns/coredns/core/dnsserver/server_tls.go +++ b/vendor/github.com/coredns/coredns/core/dnsserver/server_tls.go @@ -5,6 +5,7 @@ import ( "crypto/tls" "fmt" "net" + "time" "github.com/coredns/caddy" "github.com/coredns/coredns/plugin/pkg/reuseport" @@ -50,11 +51,20 @@ func (s *ServerTLS) Serve(l net.Listener) error { } // Only fill out the TCP server for this one. - s.server[tcp] = &dns.Server{Listener: l, Net: "tcp-tls", Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { - ctx := context.WithValue(context.Background(), Key{}, s.Server) - ctx = context.WithValue(ctx, LoopKey{}, 0) - s.ServeDNS(ctx, w, r) - })} + s.server[tcp] = &dns.Server{Listener: l, + Net: "tcp-tls", + MaxTCPQueries: tlsMaxQueries, + ReadTimeout: s.readTimeout, + WriteTimeout: s.writeTimeout, + IdleTimeout: func() time.Duration { + return s.idleTimeout + }, + Handler: dns.HandlerFunc(func(w dns.ResponseWriter, r *dns.Msg) { + ctx := context.WithValue(context.Background(), Key{}, s.Server) + ctx = context.WithValue(ctx, LoopKey{}, 0) + s.ServeDNS(ctx, w, r) + })} + s.m.Unlock() return s.server[tcp].ActivateAndServe() @@ -87,3 +97,7 @@ func (s *ServerTLS) OnStartupComplete() { fmt.Print(out) } } + +const ( + tlsMaxQueries = -1 +) diff --git a/vendor/github.com/coredns/coredns/core/dnsserver/zdirectives.go b/vendor/github.com/coredns/coredns/core/dnsserver/zdirectives.go index 38425fb0..83743ac2 100644 --- a/vendor/github.com/coredns/coredns/core/dnsserver/zdirectives.go +++ b/vendor/github.com/coredns/coredns/core/dnsserver/zdirectives.go @@ -10,14 +10,15 @@ package dnsserver // (after) them during a request, but they must not // care what plugin above them are doing. var Directives = []string{ + "root", "metadata", "geoip", "cancel", "tls", + "timeouts", "reload", "nsid", "bufsize", - "root", "bind", "debug", "trace", diff --git a/vendor/github.com/coredns/coredns/coremain/run.go b/vendor/github.com/coredns/coredns/coremain/run.go index fa765788..31b791c8 100644 --- a/vendor/github.com/coredns/coredns/coremain/run.go +++ b/vendor/github.com/coredns/coredns/coremain/run.go @@ -28,6 +28,9 @@ func init() { caddy.RegisterCaddyfileLoader("flag", caddy.LoaderFunc(confLoader)) caddy.SetDefaultCaddyfileLoader("default", caddy.LoaderFunc(defaultLoader)) + flag.StringVar(&dnsserver.Port, serverType+".port", dnsserver.DefaultPort, "Default port") + flag.StringVar(&dnsserver.Port, "p", dnsserver.DefaultPort, "Default port") + caddy.AppName = coreName caddy.AppVersion = CoreVersion } @@ -42,7 +45,7 @@ func Run() { } log.SetOutput(os.Stdout) - log.SetFlags(0) // Set to 0 because we're doing our own time, with timezone + log.SetFlags(LogFlags) if version { showVersion() @@ -166,10 +169,14 @@ var ( conf string version bool plugins bool + + // LogFlags are initially set to 0 for no extra output + LogFlags int ) // Build information obtained with the help of -ldflags var ( + // nolint appVersion = "(untracked dev build)" // inferred at startup devBuild = true // inferred at startup diff --git a/vendor/github.com/coredns/coredns/coremain/version.go b/vendor/github.com/coredns/coredns/coremain/version.go index 7578079c..232941fb 100644 --- a/vendor/github.com/coredns/coredns/coremain/version.go +++ b/vendor/github.com/coredns/coredns/coremain/version.go @@ -2,7 +2,7 @@ package coremain // Various CoreDNS constants. const ( - CoreVersion = "1.10.0" + CoreVersion = "1.11.3" coreName = "CoreDNS" serverType = "dns" ) diff --git a/vendor/github.com/coredns/coredns/plugin/cache/README.md b/vendor/github.com/coredns/coredns/plugin/cache/README.md index 562f5bd9..d516a91d 100644 --- a/vendor/github.com/coredns/coredns/plugin/cache/README.md +++ b/vendor/github.com/coredns/coredns/plugin/cache/README.md @@ -10,8 +10,7 @@ With *cache* enabled, all records except zone transfers and metadata records wil 3600s. Caching is mostly useful in a scenario when fetching data from the backend (upstream, database, etc.) is expensive. -*Cache* will change the query to enable DNSSEC (DNSSEC OK; DO) if it passes through the plugin. If -the client didn't request any DNSSEC (records), these are filtered out when replying. +*Cache* will pass DNSSEC (DNSSEC OK; DO) options through the plugin for upstream queries. This plugin can only be used once per Server Block. @@ -40,6 +39,7 @@ cache [TTL] [ZONES...] { serve_stale [DURATION] [REFRESH_MODE] servfail DURATION disable success|denial [ZONES...] + keepttl } ~~~ @@ -70,6 +70,11 @@ cache [TTL] [ZONES...] { greater than 5 minutes. * `disable` disable the success or denial cache for the listed **ZONES**. If no **ZONES** are given, the specified cache will be disabled for all zones. +* `keepttl` do not age TTL when serving responses from cache. The entry will still be removed from cache + when the TTL expires as normal, but until it expires responses will include the original TTL instead + of the remaining TTL. This can be useful if CoreDNS is used as an authoritative server and you want + to serve a consistent TTL to downstream clients. This is **NOT** recommended when CoreDNS is caching + records it is not authoritative for because it could result in downstream clients using stale answers. ## Capacity and Eviction @@ -136,4 +141,4 @@ example.org { disable denial sub.example.org } } -~~~ \ No newline at end of file +~~~ diff --git a/vendor/github.com/coredns/coredns/plugin/cache/cache.go b/vendor/github.com/coredns/coredns/plugin/cache/cache.go index b4767937..1378263b 100644 --- a/vendor/github.com/coredns/coredns/plugin/cache/cache.go +++ b/vendor/github.com/coredns/coredns/plugin/cache/cache.go @@ -48,6 +48,9 @@ type Cache struct { pexcept []string nexcept []string + // Keep ttl option + keepttl bool + // Testing. now func() time.Time } @@ -76,7 +79,7 @@ func New() *Cache { // key returns key under which we store the item, -1 will be returned if we don't store the message. // Currently we do not cache Truncated, errors zone transfers or dynamic update messages. // qname holds the already lowercased qname. -func key(qname string, m *dns.Msg, t response.Type) (bool, uint64) { +func key(qname string, m *dns.Msg, t response.Type, do, cd bool) (bool, uint64) { // We don't store truncated responses. if m.Truncated { return false, 0 @@ -86,11 +89,27 @@ func key(qname string, m *dns.Msg, t response.Type) (bool, uint64) { return false, 0 } - return true, hash(qname, m.Question[0].Qtype) + return true, hash(qname, m.Question[0].Qtype, do, cd) } -func hash(qname string, qtype uint16) uint64 { +var one = []byte("1") +var zero = []byte("0") + +func hash(qname string, qtype uint16, do, cd bool) uint64 { h := fnv.New64() + + if do { + h.Write(one) + } else { + h.Write(zero) + } + + if cd { + h.Write(one) + } else { + h.Write(zero) + } + h.Write([]byte{byte(qtype >> 8)}) h.Write([]byte{byte(qtype)}) h.Write([]byte(qname)) @@ -116,6 +135,7 @@ type ResponseWriter struct { server string // Server handling the request. do bool // When true the original request had the DO bit set. + cd bool // When true the original request had the CD bit set. ad bool // When true the original request had the AD bit set. prefetch bool // When true write nothing back to the client. remoteAddr net.Addr @@ -145,6 +165,8 @@ func newPrefetchResponseWriter(server string, state request.Request, c *Cache) * Cache: c, state: state, server: server, + do: state.Do(), + cd: state.Req.CheckingDisabled, prefetch: true, remoteAddr: addr, } @@ -163,7 +185,7 @@ func (w *ResponseWriter) WriteMsg(res *dns.Msg) error { mt, _ := response.Typify(res, w.now().UTC()) // key returns empty string for anything we don't want to cache. - hasKey, key := key(w.state.Name(), res, mt) + hasKey, key := key(w.state.Name(), res, mt, w.do, w.cd) msgTTL := dnsutil.MinimalTTL(res, mt) var duration time.Duration @@ -191,11 +213,10 @@ func (w *ResponseWriter) WriteMsg(res *dns.Msg) error { } // Apply capped TTL to this reply to avoid jarring TTL experience 1799 -> 8 (e.g.) - // We also may need to filter out DNSSEC records, see toMsg() for similar code. ttl := uint32(duration.Seconds()) - res.Answer = filterRRSlice(res.Answer, ttl, w.do, false) - res.Ns = filterRRSlice(res.Ns, ttl, w.do, false) - res.Extra = filterRRSlice(res.Extra, ttl, w.do, false) + res.Answer = filterRRSlice(res.Answer, ttl, false) + res.Ns = filterRRSlice(res.Ns, ttl, false) + res.Extra = filterRRSlice(res.Extra, ttl, false) if !w.do && !w.ad { // unset AD bit if requester is not OK with DNSSEC diff --git a/vendor/github.com/coredns/coredns/plugin/cache/dnssec.go b/vendor/github.com/coredns/coredns/plugin/cache/dnssec.go index cf908037..ec5ff41c 100644 --- a/vendor/github.com/coredns/coredns/plugin/cache/dnssec.go +++ b/vendor/github.com/coredns/coredns/plugin/cache/dnssec.go @@ -2,35 +2,13 @@ package cache import "github.com/miekg/dns" -// isDNSSEC returns true if r is a DNSSEC record. NSEC,NSEC3,DS and RRSIG/SIG -// are DNSSEC records. DNSKEYs is not in this list on the assumption that the -// client explicitly asked for it. -func isDNSSEC(r dns.RR) bool { - switch r.Header().Rrtype { - case dns.TypeNSEC: - return true - case dns.TypeNSEC3: - return true - case dns.TypeDS: - return true - case dns.TypeRRSIG: - return true - case dns.TypeSIG: - return true - } - return false -} - -// filterRRSlice filters rrs and removes DNSSEC RRs when do is false. In the returned slice -// the TTLs are set to ttl. If dup is true the RRs in rrs are _copied_ into the slice that is +// filterRRSlice filters out OPT RRs, and sets all RR TTLs to ttl. +// If dup is true the RRs in rrs are _copied_ into the slice that is // returned. -func filterRRSlice(rrs []dns.RR, ttl uint32, do, dup bool) []dns.RR { +func filterRRSlice(rrs []dns.RR, ttl uint32, dup bool) []dns.RR { j := 0 rs := make([]dns.RR, len(rrs)) for _, r := range rrs { - if !do && isDNSSEC(r) { - continue - } if r.Header().Rrtype == dns.TypeOPT { continue } diff --git a/vendor/github.com/coredns/coredns/plugin/cache/handler.go b/vendor/github.com/coredns/coredns/plugin/cache/handler.go index ec2135e8..38a8bfeb 100644 --- a/vendor/github.com/coredns/coredns/plugin/cache/handler.go +++ b/vendor/github.com/coredns/coredns/plugin/cache/handler.go @@ -18,6 +18,7 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) rc := r.Copy() // We potentially modify r, to prevent other plugins from seeing this (r is a pointer), copy r into rc. state := request.Request{W: w, Req: rc} do := state.Do() + cd := r.CheckingDisabled ad := r.AuthenticatedData zone := plugin.Zones(c.Zones).Matches(state.Name()) @@ -28,17 +29,15 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) now := c.now().UTC() server := metrics.WithServer(ctx) - // On cache miss, if the request has the OPT record and the DO bit set we leave the message as-is. If there isn't a DO bit - // set we will modify the request to _add_ one. This means we will always do DNSSEC lookups on cache misses. - // When writing to cache, any DNSSEC RRs in the response are written to cache with the response. - // When sending a response to a non-DNSSEC client, we remove DNSSEC RRs from the response. We use a 2048 buffer size, which is - // less than 4096 (and older default) and more than 1024 which may be too small. We might need to tweaks this - // value to be smaller still to prevent UDP fragmentation? + // On cache refresh, we will just use the DO bit from the incoming query for the refresh since we key our cache + // with the query DO bit. That means two separate cache items for the query DO bit true or false. In the situation + // in which upstream doesn't support DNSSEC, the two cache items will effectively be the same. Regardless, any + // DNSSEC RRs in the response are written to cache with the response. ttl := 0 i := c.getIgnoreTTL(now, state, server) if i == nil { - crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do, ad: ad, + crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do, ad: ad, cd: cd, nexcept: c.nexcept, pexcept: c.pexcept, wildcardFunc: wildcardFunc(ctx)} return c.doRefresh(ctx, state, crr) } @@ -46,7 +45,7 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) if ttl < 0 { // serve stale behavior if c.verifyStale { - crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do} + crr := &ResponseWriter{ResponseWriter: w, Cache: c, state: state, server: server, do: do, cd: cd} cw := newVerifyStaleResponseWriter(crr) ret, err := c.doRefresh(ctx, state, cw) if cw.refreshed { @@ -73,6 +72,11 @@ func (c *Cache) ServeDNS(ctx context.Context, w dns.ResponseWriter, r *dns.Msg) }) } + if c.keepttl { + // If keepttl is enabled we fake the current time to the stored + // one so that we always get the original TTL + now = i.stored + } resp := i.toMsg(r, now, do, ad) w.WriteMsg(resp) return dns.RcodeSuccess, nil @@ -101,9 +105,6 @@ func (c *Cache) doPrefetch(ctx context.Context, state request.Request, cw *Respo } func (c *Cache) doRefresh(ctx context.Context, state request.Request, cw dns.ResponseWriter) (int, error) { - if !state.Do() { - setDo(state.Req) - } return plugin.NextOrFailure(c.Name(), c.Next, ctx, cw, state.Req) } @@ -121,7 +122,7 @@ func (c *Cache) Name() string { return "cache" } // getIgnoreTTL unconditionally returns an item if it exists in the cache. func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string) *item { - k := hash(state.Name(), state.QType()) + k := hash(state.Name(), state.QType(), state.Do(), state.Req.CheckingDisabled) cacheRequests.WithLabelValues(server, c.zonesMetricLabel, c.viewMetricLabel).Inc() if i, ok := c.ncache.Get(k); ok { @@ -145,7 +146,7 @@ func (c *Cache) getIgnoreTTL(now time.Time, state request.Request, server string } func (c *Cache) exists(state request.Request) *item { - k := hash(state.Name(), state.QType()) + k := hash(state.Name(), state.QType(), state.Do(), state.Req.CheckingDisabled) if i, ok := c.ncache.Get(k); ok { return i.(*item) } @@ -154,22 +155,3 @@ func (c *Cache) exists(state request.Request) *item { } return nil } - -// setDo sets the DO bit and UDP buffer size in the message m. -func setDo(m *dns.Msg) { - o := m.IsEdns0() - if o != nil { - o.SetDo() - o.SetUDPSize(defaultUDPBufSize) - return - } - - o = &dns.OPT{Hdr: dns.RR_Header{Name: ".", Rrtype: dns.TypeOPT}} - o.SetDo() - o.SetUDPSize(defaultUDPBufSize) - m.Extra = append(m.Extra, o) -} - -// defaultUDPBufsize is the bufsize the cache plugin uses on outgoing requests that don't -// have an OPT RR. -const defaultUDPBufSize = 2048 diff --git a/vendor/github.com/coredns/coredns/plugin/cache/item.go b/vendor/github.com/coredns/coredns/plugin/cache/item.go index 6b51a5ba..c5aeccdc 100644 --- a/vendor/github.com/coredns/coredns/plugin/cache/item.go +++ b/vendor/github.com/coredns/coredns/plugin/cache/item.go @@ -87,9 +87,9 @@ func (i *item) toMsg(m *dns.Msg, now time.Time, do bool, ad bool) *dns.Msg { m1.Extra = make([]dns.RR, len(i.Extra)) ttl := uint32(i.ttl(now)) - m1.Answer = filterRRSlice(i.Answer, ttl, do, true) - m1.Ns = filterRRSlice(i.Ns, ttl, do, true) - m1.Extra = filterRRSlice(i.Extra, ttl, do, true) + m1.Answer = filterRRSlice(i.Answer, ttl, true) + m1.Ns = filterRRSlice(i.Ns, ttl, true) + m1.Extra = filterRRSlice(i.Extra, ttl, true) return m1 } diff --git a/vendor/github.com/coredns/coredns/plugin/cache/setup.go b/vendor/github.com/coredns/coredns/plugin/cache/setup.go index 6a537d98..f8278b87 100644 --- a/vendor/github.com/coredns/coredns/plugin/cache/setup.go +++ b/vendor/github.com/coredns/coredns/plugin/cache/setup.go @@ -240,6 +240,12 @@ func cacheParse(c *caddy.Controller) (*Cache, error) { default: return nil, fmt.Errorf("cache type for disable must be %q or %q", Success, Denial) } + case "keepttl": + args := c.RemainingArgs() + if len(args) != 0 { + return nil, c.ArgErr() + } + ca.keepttl = true default: return nil, c.ArgErr() } diff --git a/vendor/github.com/coredns/coredns/plugin/metadata/provider.go b/vendor/github.com/coredns/coredns/plugin/metadata/provider.go index e1bd7059..2e88d58e 100644 --- a/vendor/github.com/coredns/coredns/plugin/metadata/provider.go +++ b/vendor/github.com/coredns/coredns/plugin/metadata/provider.go @@ -8,33 +8,32 @@ // // Implement the Provider interface for a plugin p: // -// func (p P) Metadata(ctx context.Context, state request.Request) context.Context { -// metadata.SetValueFunc(ctx, "test/something", func() string { return "myvalue" }) -// return ctx -// } +// func (p P) Metadata(ctx context.Context, state request.Request) context.Context { +// metadata.SetValueFunc(ctx, "test/something", func() string { return "myvalue" }) +// return ctx +// } // // Basic example with caching: // -// func (p P) Metadata(ctx context.Context, state request.Request) context.Context { -// cached := "" -// f := func() string { -// if cached != "" { -// return cached -// } -// cached = expensiveFunc() -// return cached -// } -// metadata.SetValueFunc(ctx, "test/something", f) -// return ctx -// } +// func (p P) Metadata(ctx context.Context, state request.Request) context.Context { +// cached := "" +// f := func() string { +// if cached != "" { +// return cached +// } +// cached = expensiveFunc() +// return cached +// } +// metadata.SetValueFunc(ctx, "test/something", f) +// return ctx +// } // // If you need access to this metadata from another plugin: // -// // ... -// valueFunc := metadata.ValueFunc(ctx, "test/something") -// value := valueFunc() -// // use 'value' -// +// // ... +// valueFunc := metadata.ValueFunc(ctx, "test/something") +// value := valueFunc() +// // use 'value' package metadata import ( diff --git a/vendor/github.com/coredns/coredns/plugin/metrics/README.md b/vendor/github.com/coredns/coredns/plugin/metrics/README.md index ec5da10d..144a5d1c 100644 --- a/vendor/github.com/coredns/coredns/plugin/metrics/README.md +++ b/vendor/github.com/coredns/coredns/plugin/metrics/README.md @@ -21,6 +21,7 @@ the following metrics are exported: * `coredns_dns_response_size_bytes{server, zone, view, proto}` - response size in bytes. * `coredns_dns_responses_total{server, zone, view, rcode, plugin}` - response per zone, rcode and plugin. * `coredns_dns_https_responses_total{server, status}` - responses per server and http status code. +* `coredns_dns_quic_responses_total{server, status}` - responses per server and QUIC application code. * `coredns_plugin_enabled{server, zone, view, name}` - indicates whether a plugin is enabled on per server, zone and view basis. Almost each counter has a label `zone` which is the zonename used for the request/response. diff --git a/vendor/github.com/coredns/coredns/plugin/metrics/vars/vars.go b/vendor/github.com/coredns/coredns/plugin/metrics/vars/vars.go index f0cf829c..7b807850 100644 --- a/vendor/github.com/coredns/coredns/plugin/metrics/vars/vars.go +++ b/vendor/github.com/coredns/coredns/plugin/metrics/vars/vars.go @@ -17,19 +17,21 @@ var ( }, []string{"server", "zone", "view", "proto", "family", "type"}) RequestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: plugin.Namespace, - Subsystem: subsystem, - Name: "request_duration_seconds", - Buckets: plugin.TimeBuckets, - Help: "Histogram of the time (in seconds) each request took per zone.", + Namespace: plugin.Namespace, + Subsystem: subsystem, + Name: "request_duration_seconds", + Buckets: plugin.TimeBuckets, + NativeHistogramBucketFactor: plugin.NativeHistogramBucketFactor, + Help: "Histogram of the time (in seconds) each request took per zone.", }, []string{"server", "zone", "view"}) RequestSize = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: plugin.Namespace, - Subsystem: subsystem, - Name: "request_size_bytes", - Help: "Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol.", - Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3}, + Namespace: plugin.Namespace, + Subsystem: subsystem, + Name: "request_size_bytes", + Help: "Size of the EDNS0 UDP buffer in bytes (64K for TCP) per zone and protocol.", + Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3}, + NativeHistogramBucketFactor: plugin.NativeHistogramBucketFactor, }, []string{"server", "zone", "view", "proto"}) RequestDo = promauto.NewCounterVec(prometheus.CounterOpts{ @@ -40,11 +42,12 @@ var ( }, []string{"server", "zone", "view"}) ResponseSize = promauto.NewHistogramVec(prometheus.HistogramOpts{ - Namespace: plugin.Namespace, - Subsystem: subsystem, - Name: "response_size_bytes", - Help: "Size of the returned response in bytes.", - Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3}, + Namespace: plugin.Namespace, + Subsystem: subsystem, + Name: "response_size_bytes", + Help: "Size of the returned response in bytes.", + Buckets: []float64{0, 100, 200, 300, 400, 511, 1023, 2047, 4095, 8291, 16e3, 32e3, 48e3, 64e3}, + NativeHistogramBucketFactor: plugin.NativeHistogramBucketFactor, }, []string{"server", "zone", "view", "proto"}) ResponseRcode = promauto.NewCounterVec(prometheus.CounterOpts{ @@ -72,6 +75,13 @@ var ( Name: "https_responses_total", Help: "Counter of DoH responses per server and http status code.", }, []string{"server", "status"}) + + QUICResponsesCount = promauto.NewCounterVec(prometheus.CounterOpts{ + Namespace: plugin.Namespace, + Subsystem: subsystem, + Name: "quic_responses_total", + Help: "Counter of DoQ responses per server and QUIC application code.", + }, []string{"server", "status"}) ) const ( diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/dnsutil/ttl.go b/vendor/github.com/coredns/coredns/plugin/pkg/dnsutil/ttl.go index e2b26526..c7f423a7 100644 --- a/vendor/github.com/coredns/coredns/plugin/pkg/dnsutil/ttl.go +++ b/vendor/github.com/coredns/coredns/plugin/pkg/dnsutil/ttl.go @@ -48,5 +48,6 @@ const ( // MinimalDefaultTTL is the absolute lowest TTL we use in CoreDNS. MinimalDefaultTTL = 5 * time.Second // MaximumDefaulTTL is the maximum TTL was use on RRsets in CoreDNS. + // TODO: rename as MaximumDefaultTTL MaximumDefaulTTL = 1 * time.Hour ) diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/doh/doh.go b/vendor/github.com/coredns/coredns/plugin/pkg/doh/doh.go index 9d5305b3..faddfc8a 100644 --- a/vendor/github.com/coredns/coredns/plugin/pkg/doh/doh.go +++ b/vendor/github.com/coredns/coredns/plugin/pkg/doh/doh.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net/http" + "strings" "github.com/miekg/dns" ) @@ -16,18 +17,30 @@ const MimeType = "application/dns-message" // Path is the URL path that should be used. const Path = "/dns-query" -// NewRequest returns a new DoH request given a method, URL (without any paths, so exclude /dns-query) and dns.Msg. +// NewRequest returns a new DoH request given a HTTP method, URL and dns.Msg. +// +// The URL should not have a path, so please exclude /dns-query. The URL will +// be prefixed with https:// by default, unless it's already prefixed with +// either http:// or https://. func NewRequest(method, url string, m *dns.Msg) (*http.Request, error) { buf, err := m.Pack() if err != nil { return nil, err } + if !strings.HasPrefix(url, "http://") && !strings.HasPrefix(url, "https://") { + url = fmt.Sprintf("https://%s", url) + } + switch method { case http.MethodGet: b64 := base64.RawURLEncoding.EncodeToString(buf) - req, err := http.NewRequest(http.MethodGet, "https://"+url+Path+"?dns="+b64, nil) + req, err := http.NewRequest( + http.MethodGet, + fmt.Sprintf("%s%s?dns=%s", url, Path, b64), + nil, + ) if err != nil { return req, err } @@ -37,7 +50,11 @@ func NewRequest(method, url string, m *dns.Msg) (*http.Request, error) { return req, nil case http.MethodPost: - req, err := http.NewRequest(http.MethodPost, "https://"+url+Path+"?bla=foo:443", bytes.NewReader(buf)) + req, err := http.NewRequest( + http.MethodPost, + fmt.Sprintf("%s%s?bla=foo:443", url, Path), + bytes.NewReader(buf), + ) if err != nil { return req, err } diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/edns/edns.go b/vendor/github.com/coredns/coredns/plugin/pkg/edns/edns.go index 31f57ea9..cd863991 100644 --- a/vendor/github.com/coredns/coredns/plugin/pkg/edns/edns.go +++ b/vendor/github.com/coredns/coredns/plugin/pkg/edns/edns.go @@ -36,8 +36,7 @@ func SupportedOption(option uint16) bool { // Version checks the EDNS version in the request. If error // is nil everything is OK and we can invoke the plugin. If non-nil, the -// returned Msg is valid to be returned to the client (and should). For some -// reason this response should not contain a question RR in the question section. +// returned Msg is valid to be returned to the client (and should). func Version(req *dns.Msg) (*dns.Msg, error) { opt := req.IsEdns0() if opt == nil { @@ -48,8 +47,6 @@ func Version(req *dns.Msg) (*dns.Msg, error) { } m := new(dns.Msg) m.SetReply(req) - // zero out question section, wtf. - m.Question = nil o := new(dns.OPT) o.Hdr.Name = "." diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/log/log.go b/vendor/github.com/coredns/coredns/plugin/pkg/log/log.go index 0589a345..ad8d7ac3 100644 --- a/vendor/github.com/coredns/coredns/plugin/pkg/log/log.go +++ b/vendor/github.com/coredns/coredns/plugin/pkg/log/log.go @@ -13,7 +13,7 @@ import ( "io" golog "log" "os" - "sync" + "sync/atomic" ) // D controls whether we should output debug logs. If true, we do, once set @@ -21,30 +21,22 @@ import ( var D = &d{} type d struct { - on bool - sync.RWMutex + on atomic.Bool } // Set enables debug logging. func (d *d) Set() { - d.Lock() - d.on = true - d.Unlock() + d.on.Store(true) } // Clear disables debug logging. func (d *d) Clear() { - d.Lock() - d.on = false - d.Unlock() + d.on.Store(false) } // Value returns if debug logging is enabled. func (d *d) Value() bool { - d.RLock() - b := d.on - d.RUnlock() - return b + return d.on.Load() } // logf calls log.Printf prefixed with level. diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/nonwriter/nonwriter.go b/vendor/github.com/coredns/coredns/plugin/pkg/nonwriter/nonwriter.go deleted file mode 100644 index 411e98a9..00000000 --- a/vendor/github.com/coredns/coredns/plugin/pkg/nonwriter/nonwriter.go +++ /dev/null @@ -1,21 +0,0 @@ -// Package nonwriter implements a dns.ResponseWriter that never writes, but captures the dns.Msg being written. -package nonwriter - -import ( - "github.com/miekg/dns" -) - -// Writer is a type of ResponseWriter that captures the message, but never writes to the client. -type Writer struct { - dns.ResponseWriter - Msg *dns.Msg -} - -// New makes and returns a new NonWriter. -func New(w dns.ResponseWriter) *Writer { return &Writer{ResponseWriter: w} } - -// WriteMsg records the message, but doesn't write it itself. -func (w *Writer) WriteMsg(res *dns.Msg) error { - w.Msg = res - return nil -} diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/parse/host.go b/vendor/github.com/coredns/coredns/plugin/pkg/parse/host.go index 9206a033..78f7cd93 100644 --- a/vendor/github.com/coredns/coredns/plugin/pkg/parse/host.go +++ b/vendor/github.com/coredns/coredns/plugin/pkg/parse/host.go @@ -33,6 +33,14 @@ func HostPortOrFile(s ...string) ([]string, error) { var servers []string for _, h := range s { trans, host := Transport(h) + if len(host) == 0 { + return servers, fmt.Errorf("invalid address: %q", h) + } + + if trans == transport.UNIX { + servers = append(servers, trans+"://"+host) + continue + } addr, _, err := net.SplitHostPort(host) @@ -53,6 +61,8 @@ func HostPortOrFile(s ...string) ([]string, error) { ss = net.JoinHostPort(host, transport.Port) case transport.TLS: ss = transport.TLS + "://" + net.JoinHostPort(host, transport.TLSPort) + case transport.QUIC: + ss = transport.QUIC + "://" + net.JoinHostPort(host, transport.QUICPort) case transport.GRPC: ss = transport.GRPC + "://" + net.JoinHostPort(host, transport.GRPCPort) case transport.HTTPS: @@ -89,7 +99,7 @@ func tryFile(s string) ([]string, error) { servers := []string{} for _, s := range c.Servers { - servers = append(servers, net.JoinHostPort(s, c.Port)) + servers = append(servers, net.JoinHostPort(stripZone(s), c.Port)) } return servers, nil } diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/parse/transport.go b/vendor/github.com/coredns/coredns/plugin/pkg/parse/transport.go index d632120d..f0cf1c24 100644 --- a/vendor/github.com/coredns/coredns/plugin/pkg/parse/transport.go +++ b/vendor/github.com/coredns/coredns/plugin/pkg/parse/transport.go @@ -19,6 +19,10 @@ func Transport(s string) (trans string, addr string) { s = s[len(transport.DNS+"://"):] return transport.DNS, s + case strings.HasPrefix(s, transport.QUIC+"://"): + s = s[len(transport.QUIC+"://"):] + return transport.QUIC, s + case strings.HasPrefix(s, transport.GRPC+"://"): s = s[len(transport.GRPC+"://"):] return transport.GRPC, s @@ -27,6 +31,9 @@ func Transport(s string) (trans string, addr string) { s = s[len(transport.HTTPS+"://"):] return transport.HTTPS, s + case strings.HasPrefix(s, transport.UNIX+"://"): + s = s[len(transport.UNIX+"://"):] + return transport.UNIX, s } return transport.DNS, s diff --git a/vendor/github.com/coredns/coredns/plugin/pkg/transport/transport.go b/vendor/github.com/coredns/coredns/plugin/pkg/transport/transport.go index 85b3bee5..cdb2c79b 100644 --- a/vendor/github.com/coredns/coredns/plugin/pkg/transport/transport.go +++ b/vendor/github.com/coredns/coredns/plugin/pkg/transport/transport.go @@ -4,8 +4,10 @@ package transport const ( DNS = "dns" TLS = "tls" + QUIC = "quic" GRPC = "grpc" HTTPS = "https" + UNIX = "unix" ) // Port numbers for the various transports. @@ -14,6 +16,8 @@ const ( Port = "53" // TLSPort is the default port for DNS-over-TLS. TLSPort = "853" + // QUICPort is the default port for DNS-over-QUIC. + QUICPort = "853" // GRPCPort is the default port for DNS-over-gRPC. GRPCPort = "443" // HTTPSPort is the default port for DNS-over-HTTPS. diff --git a/vendor/github.com/coredns/coredns/plugin/plugin.go b/vendor/github.com/coredns/coredns/plugin/plugin.go index 51f5ba79..ca5fe010 100644 --- a/vendor/github.com/coredns/coredns/plugin/plugin.go +++ b/vendor/github.com/coredns/coredns/plugin/plugin.go @@ -108,5 +108,9 @@ var TimeBuckets = prometheus.ExponentialBuckets(0.00025, 2, 16) // from 0.25ms t // SlimTimeBuckets is low cardinality set of duration buckets. var SlimTimeBuckets = prometheus.ExponentialBuckets(0.00025, 10, 5) // from 0.25ms to 2.5 seconds +// NativeHistogramBucketFactor controls the resolution of Prometheus native histogram buckets. +// See: https://pkg.go.dev/github.com/prometheus/client_golang@v1.19.0/prometheus#section-readme +var NativeHistogramBucketFactor = 1.05 + // ErrOnce is returned when a plugin doesn't support multiple setups per server. var ErrOnce = errors.New("this plugin can only be used once per Server Block") diff --git a/vendor/github.com/coredns/coredns/plugin/test/file.go b/vendor/github.com/coredns/coredns/plugin/test/file.go index 969406e9..667b6a3f 100644 --- a/vendor/github.com/coredns/coredns/plugin/test/file.go +++ b/vendor/github.com/coredns/coredns/plugin/test/file.go @@ -3,6 +3,7 @@ package test import ( "os" "path/filepath" + "testing" ) // TempFile will create a temporary file on disk and returns the name and a cleanup function to remove it later. @@ -18,12 +19,9 @@ func TempFile(dir, content string) (string, func(), error) { return f.Name(), rmFunc, nil } -// WritePEMFiles creates a tmp dir with ca.pem, cert.pem, and key.pem and the func to remove it -func WritePEMFiles(dir string) (string, func(), error) { - tempDir, err := os.MkdirTemp(dir, "go-test-pemfiles") - if err != nil { - return "", nil, err - } +// WritePEMFiles creates a tmp dir with ca.pem, cert.pem, and key.pem +func WritePEMFiles(t *testing.T) (string, error) { + tempDir := t.TempDir() data := `-----BEGIN CERTIFICATE----- MIIC9zCCAd+gAwIBAgIJALGtqdMzpDemMA0GCSqGSIb3DQEBCwUAMBIxEDAOBgNV @@ -45,7 +43,7 @@ I1rs/VUGKzcJGVIWbHrgjP68CTStGAvKgbsTqw7aLXTSqtPw88N9XVSyRg== -----END CERTIFICATE-----` path := filepath.Join(tempDir, "ca.pem") if err := os.WriteFile(path, []byte(data), 0644); err != nil { - return "", nil, err + return "", err } data = `-----BEGIN CERTIFICATE----- MIICozCCAYsCCQCRlf5BrvPuqjANBgkqhkiG9w0BAQsFADASMRAwDgYDVQQDDAdr @@ -65,8 +63,8 @@ zhDEPP4FhY+Sz+y1yWirphl7A1aZwhXVPcfWIGqpQ3jzNwUeocbH27kuLh+U4hQo qeg10RdFnw== -----END CERTIFICATE-----` path = filepath.Join(tempDir, "cert.pem") - if err = os.WriteFile(path, []byte(data), 0644); err != nil { - return "", nil, err + if err := os.WriteFile(path, []byte(data), 0644); err != nil { + return "", err } data = `-----BEGIN RSA PRIVATE KEY----- @@ -97,10 +95,9 @@ E/WObVJXDnBdViu0L9abE9iaTToBVri4cmlDlZagLuKVR+TFTCN/DSlVZTDkqkLI 8chzqtkH6b2b2R73hyRysWjsomys34ma3mEEPTX/aXeAF2MSZ/EWT9yL -----END RSA PRIVATE KEY-----` path = filepath.Join(tempDir, "key.pem") - if err = os.WriteFile(path, []byte(data), 0644); err != nil { - return "", nil, err + if err := os.WriteFile(path, []byte(data), 0644); err != nil { + return "", err } - rmFunc := func() { os.RemoveAll(tempDir) } - return tempDir, rmFunc, nil + return tempDir, nil } diff --git a/vendor/github.com/coredns/coredns/plugin/test/helpers.go b/vendor/github.com/coredns/coredns/plugin/test/helpers.go index 8145b605..f99790a2 100644 --- a/vendor/github.com/coredns/coredns/plugin/test/helpers.go +++ b/vendor/github.com/coredns/coredns/plugin/test/helpers.go @@ -29,15 +29,19 @@ func (p RRSet) Less(i, j int) bool { return p[i].String() < p[j].String() } // Case represents a test case that encapsulates various data from a query and response. // Note that is the TTL of a record is 303 we don't compare it with the TTL. type Case struct { - Qname string - Qtype uint16 - Rcode int - Do bool - AuthenticatedData bool - Answer []dns.RR - Ns []dns.RR - Extra []dns.RR - Error error + Qname string + Qtype uint16 + Rcode int + Do bool + CheckingDisabled bool + RecursionAvailable bool + AuthenticatedData bool + Authoritative bool + Truncated bool + Answer []dns.RR + Ns []dns.RR + Extra []dns.RR + Error error } // Msg returns a *dns.Msg embedded in c. diff --git a/vendor/github.com/coredns/coredns/plugin/test/scrape.go b/vendor/github.com/coredns/coredns/plugin/test/scrape.go index 7847e39d..7ac22d53 100644 --- a/vendor/github.com/coredns/coredns/plugin/test/scrape.go +++ b/vendor/github.com/coredns/coredns/plugin/test/scrape.go @@ -19,7 +19,6 @@ // // result := Scrape("http://localhost:9153/metrics") // v := MetricValue("coredns_cache_capacity", result) -// package test import ( @@ -217,7 +216,7 @@ func makeBuckets(m *dto.Metric) map[string]string { func fetchMetricFamilies(url string, ch chan<- *dto.MetricFamily) { defer close(ch) - req, err := http.NewRequest("GET", url, nil) + req, err := http.NewRequest(http.MethodGet, url, nil) if err != nil { return } diff --git a/vendor/github.com/gobwas/ws/.travis.yml b/vendor/github.com/gobwas/ws/.travis.yml deleted file mode 100644 index cf74f1be..00000000 --- a/vendor/github.com/gobwas/ws/.travis.yml +++ /dev/null @@ -1,25 +0,0 @@ -sudo: required - -language: go - -services: - - docker - -os: - - linux - - windows - -go: - - 1.8.x - - 1.9.x - - 1.10.x - - 1.11.x - - 1.x - -install: - - go get github.com/gobwas/pool - - go get github.com/gobwas/httphead - -script: - - if [ "$TRAVIS_OS_NAME" = "windows" ]; then go test ./...; fi - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then make test autobahn; fi diff --git a/vendor/github.com/gobwas/ws/LICENSE b/vendor/github.com/gobwas/ws/LICENSE index d2611fdd..ca6dfd9e 100644 --- a/vendor/github.com/gobwas/ws/LICENSE +++ b/vendor/github.com/gobwas/ws/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2017-2018 Sergey Kamardin +Copyright (c) 2017-2021 Sergey Kamardin Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/gobwas/ws/Makefile b/vendor/github.com/gobwas/ws/Makefile index 075e83c7..8f727393 100644 --- a/vendor/github.com/gobwas/ws/Makefile +++ b/vendor/github.com/gobwas/ws/Makefile @@ -13,15 +13,22 @@ bin/gocovmerge: .PHONY: autobahn autobahn: clean bin/reporter - ./autobahn/script/test.sh --build + ./autobahn/script/test.sh --build --follow-logs bin/reporter $(PWD)/autobahn/report/index.json +.PHONY: autobahn/report +autobahn/report: bin/reporter + ./bin/reporter -http localhost:5555 ./autobahn/report/index.json + test: go test -coverprofile=ws.coverage . go test -coverprofile=wsutil.coverage ./wsutil + go test -coverprofile=wsfalte.coverage ./wsflate + # No statemenets to cover in ./tests (there are only tests). + go test ./tests cover: bin/gocovmerge test autobahn - bin/gocovmerge ws.coverage wsutil.coverage autobahn/report/server.coverage > total.coverage + bin/gocovmerge ws.coverage wsutil.coverage wsflate.coverage autobahn/report/server.coverage > total.coverage benchcmp: BENCH_BRANCH=$(shell git rev-parse --abbrev-ref HEAD) benchcmp: BENCH_OLD:=$(shell mktemp -t old.XXXX) diff --git a/vendor/github.com/gobwas/ws/README.md b/vendor/github.com/gobwas/ws/README.md index 74acd78b..0bd0f6b0 100644 --- a/vendor/github.com/gobwas/ws/README.md +++ b/vendor/github.com/gobwas/ws/README.md @@ -1,7 +1,7 @@ # ws [![GoDoc][godoc-image]][godoc-url] -[![Travis][travis-image]][travis-url] +[![CI][ci-badge]][ci-url] > [RFC6455][rfc-url] WebSocket implementation in Go. @@ -351,10 +351,191 @@ func main() { } ``` +# Compression + +There is a `ws/wsflate` package to support [Permessage-Deflate Compression +Extension][rfc-pmce]. + +It provides minimalistic I/O wrappers to be used in conjunction with any +deflate implementation (for example, the standard library's +[compress/flate][compress/flate]). + +It is also compatible with `wsutil`'s reader and writer by providing +`wsflate.MessageState` type, which implements `wsutil.SendExtension` and +`wsutil.RecvExtension` interfaces. + +```go +package main + +import ( + "bytes" + "log" + "net" + + "github.com/gobwas/ws" + "github.com/gobwas/ws/wsflate" +) + +func main() { + ln, err := net.Listen("tcp", "localhost:8080") + if err != nil { + // handle error + } + e := wsflate.Extension{ + // We are using default parameters here since we use + // wsflate.{Compress,Decompress}Frame helpers below in the code. + // This assumes that we use standard compress/flate package as flate + // implementation. + Parameters: wsflate.DefaultParameters, + } + u := ws.Upgrader{ + Negotiate: e.Negotiate, + } + for { + conn, err := ln.Accept() + if err != nil { + log.Fatal(err) + } + + // Reset extension after previous upgrades. + e.Reset() + + _, err = u.Upgrade(conn) + if err != nil { + log.Printf("upgrade error: %s", err) + continue + } + if _, ok := e.Accepted(); !ok { + log.Printf("didn't negotiate compression for %s", conn.RemoteAddr()) + conn.Close() + continue + } + + go func() { + defer conn.Close() + for { + frame, err := ws.ReadFrame(conn) + if err != nil { + // Handle error. + return + } + + frame = ws.UnmaskFrameInPlace(frame) + + if wsflate.IsCompressed(frame.Header) { + // Note that even after successful negotiation of + // compression extension, both sides are able to send + // non-compressed messages. + frame, err = wsflate.DecompressFrame(frame) + if err != nil { + // Handle error. + return + } + } + + // Do something with frame... + + ack := ws.NewTextFrame([]byte("this is an acknowledgement")) + + // Compress response unconditionally. + ack, err = wsflate.CompressFrame(ack) + if err != nil { + // Handle error. + return + } + if err = ws.WriteFrame(conn, ack); err != nil { + // Handle error. + return + } + } + }() + } +} +``` + +You can use compression with `wsutil` package this way: + +```go + // Upgrade somehow and negotiate compression to get the conn... + + // Initialize flate reader. We are using nil as a source io.Reader because + // we will Reset() it in the message i/o loop below. + fr := wsflate.NewReader(nil, func(r io.Reader) wsflate.Decompressor { + return flate.NewReader(r) + }) + // Initialize flate writer. We are using nil as a destination io.Writer + // because we will Reset() it in the message i/o loop below. + fw := wsflate.NewWriter(nil, func(w io.Writer) wsflate.Compressor { + f, _ := flate.NewWriter(w, 9) + return f + }) + + // Declare compression message state variable. + // + // It has two goals: + // - Allow users to check whether received message is compressed or not. + // - Help wsutil.Reader and wsutil.Writer to set/unset appropriate + // WebSocket header bits while writing next frame to the wire (it + // implements wsutil.RecvExtension and wsutil.SendExtension). + var msg wsflate.MessageState + + // Initialize WebSocket reader as previously. + // Please note the use of Reader.Extensions field as well as + // of ws.StateExtended flag. + rd := &wsutil.Reader{ + Source: conn, + State: ws.StateServerSide | ws.StateExtended, + Extensions: []wsutil.RecvExtension{ + &msg, + }, + } + + // Initialize WebSocket writer with ws.StateExtended flag as well. + wr := wsutil.NewWriter(conn, ws.StateServerSide|ws.StateExtended, 0) + // Use the message state as wsutil.SendExtension. + wr.SetExtensions(&msg) + + for { + h, err := rd.NextFrame() + if err != nil { + // handle error. + } + if h.OpCode.IsControl() { + // handle control frame. + } + if !msg.IsCompressed() { + // handle uncompressed frame (skipped for the sake of example + // simplicity). + } + + // Reset the writer to echo same op code. + wr.Reset(h.OpCode) + + // Reset both flate reader and writer to start the new round of i/o. + fr.Reset(rd) + fw.Reset(wr) + + // Copy whole message from reader to writer decompressing it and + // compressing again. + if _, err := io.Copy(fw, fr); err != nil { + // handle error. + } + // Flush any remaining buffers from flate writer to WebSocket writer. + if err := fw.Close(); err != nil { + // handle error. + } + // Flush the whole WebSocket message to the wire. + if err := wr.Flush(); err != nil { + // handle error. + } + } +``` [rfc-url]: https://tools.ietf.org/html/rfc6455 +[rfc-pmce]: https://tools.ietf.org/html/rfc7692#section-7 [godoc-image]: https://godoc.org/github.com/gobwas/ws?status.svg [godoc-url]: https://godoc.org/github.com/gobwas/ws -[travis-image]: https://travis-ci.org/gobwas/ws.svg?branch=master -[travis-url]: https://travis-ci.org/gobwas/ws +[compress/flate]: https://golang.org/pkg/compress/flate/ +[ci-badge]: https://github.com/gobwas/ws/workflows/CI/badge.svg +[ci-url]: https://github.com/gobwas/ws/actions?query=workflow%3ACI diff --git a/vendor/github.com/gobwas/ws/cipher.go b/vendor/github.com/gobwas/ws/cipher.go index 026f4fd0..3c35e6b8 100644 --- a/vendor/github.com/gobwas/ws/cipher.go +++ b/vendor/github.com/gobwas/ws/cipher.go @@ -36,7 +36,7 @@ func Cipher(payload []byte, mask [4]byte, offset int) { } // NOTE: we use here binary.LittleEndian regardless of what is real - // endianess on machine is. To do so, we have to use binary.LittleEndian in + // endianness on machine is. To do so, we have to use binary.LittleEndian in // the masking loop below as well. var ( m = binary.LittleEndian.Uint32(mask[:]) diff --git a/vendor/github.com/gobwas/ws/dialer.go b/vendor/github.com/gobwas/ws/dialer.go index 4357be21..64d46811 100644 --- a/vendor/github.com/gobwas/ws/dialer.go +++ b/vendor/github.com/gobwas/ws/dialer.go @@ -8,6 +8,7 @@ import ( "fmt" "io" "net" + "net/http" "net/url" "strconv" "strings" @@ -145,7 +146,7 @@ type Dialer struct { func (d Dialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *bufio.Reader, hs Handshake, err error) { u, err := url.ParseRequestURI(urlstr) if err != nil { - return + return nil, nil, hs, err } // Prepare context to dial with. Initially it is the same as original, but @@ -163,7 +164,7 @@ func (d Dialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *buf } } if conn, err = d.dial(dialctx, u); err != nil { - return + return conn, nil, hs, err } defer func() { if err != nil { @@ -189,7 +190,7 @@ func (d Dialer) Dial(ctx context.Context, urlstr string) (conn net.Conn, br *buf br, hs, err = d.Upgrade(conn, u) - return + return conn, br, hs, err } var ( @@ -204,7 +205,7 @@ func tlsDefaultConfig() *tls.Config { return &tlsEmptyConfig } -func hostport(host string, defaultPort string) (hostname, addr string) { +func hostport(host, defaultPort string) (hostname, addr string) { var ( colon = strings.LastIndexByte(host, ':') bracket = strings.IndexByte(host, ']') @@ -228,7 +229,7 @@ func (d Dialer) dial(ctx context.Context, u *url.URL) (conn net.Conn, err error) hostname, addr := hostport(u.Host, ":443") conn, err = dial(ctx, "tcp", addr) if err != nil { - return + return nil, err } tlsClient := d.TLSClient if tlsClient == nil { @@ -241,7 +242,7 @@ func (d Dialer) dial(ctx context.Context, u *url.URL) (conn net.Conn, err error) if wrap := d.WrapConn; wrap != nil { conn = wrap(conn) } - return + return conn, err } func (d Dialer) tlsClient(conn net.Conn, hostname string) net.Conn { @@ -310,29 +311,29 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha initNonce(nonce) httpWriteUpgradeRequest(bw, u, nonce, d.Protocols, d.Extensions, d.Header) - if err = bw.Flush(); err != nil { - return + if err := bw.Flush(); err != nil { + return br, hs, err } // Read HTTP status line like "HTTP/1.1 101 Switching Protocols". sl, err := readLine(br) if err != nil { - return + return br, hs, err } // Begin validation of the response. // See https://tools.ietf.org/html/rfc6455#section-4.2.2 // Parse request line data like HTTP version, uri and method. resp, err := httpParseResponseLine(sl) if err != nil { - return + return br, hs, err } // Even if RFC says "1.1 or higher" without mentioning the part of the // version, we apply it only to minor part. if resp.major != 1 || resp.minor < 1 { err = ErrHandshakeBadProtocol - return + return br, hs, err } - if resp.status != 101 { + if resp.status != http.StatusSwitchingProtocols { err = StatusError(resp.status) if onStatusError := d.OnStatusError; onStatusError != nil { // Invoke callback with multireader of status-line bytes br. @@ -344,7 +345,7 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha ), ) } - return + return br, hs, err } // If response status is 101 then we expect all technical headers to be // valid. If not, then we stop processing response without giving user @@ -355,7 +356,7 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha line, e := readLine(br) if e != nil { err = e - return + return br, hs, err } if len(line) == 0 { // Blank line, no more lines to read. @@ -365,7 +366,7 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha k, v, ok := httpParseHeaderLine(line) if !ok { err = ErrMalformedResponse - return + return br, hs, err } switch btsToString(k) { @@ -373,7 +374,7 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha headerSeen |= headerSeenUpgrade if !bytes.Equal(v, specHeaderValueUpgrade) && !bytes.EqualFold(v, specHeaderValueUpgrade) { err = ErrHandshakeBadUpgrade - return + return br, hs, err } case headerConnectionCanonical: @@ -384,14 +385,14 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha // multiple token. But in response it must contains exactly one. if !bytes.Equal(v, specHeaderValueConnection) && !bytes.EqualFold(v, specHeaderValueConnection) { err = ErrHandshakeBadConnection - return + return br, hs, err } case headerSecAcceptCanonical: headerSeen |= headerSeenSecAccept if !checkAcceptFromNonce(v, nonce) { err = ErrHandshakeBadSecAccept - return + return br, hs, err } case headerSecProtocolCanonical: @@ -409,20 +410,20 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha // Server echoed subprotocol that is not present in client // requested protocols. err = ErrHandshakeBadSubProtocol - return + return br, hs, err } case headerSecExtensionsCanonical: hs.Extensions, err = matchSelectedExtensions(v, d.Extensions, hs.Extensions) if err != nil { - return + return br, hs, err } default: if onHeader := d.OnHeader; onHeader != nil { if e := onHeader(k, v); e != nil { err = e - return + return br, hs, err } } } @@ -439,7 +440,7 @@ func (d Dialer) Upgrade(conn io.ReadWriter, u *url.URL) (br *bufio.Reader, hs Ha panic("unknown headers state") } } - return + return br, hs, err } // PutReader returns bufio.Reader instance to the inner reuse pool. @@ -474,10 +475,19 @@ func matchSelectedExtensions(selected []byte, wanted, received []httphead.Option index = -1 match := func() (ok bool) { for _, want := range wanted { - if option.Equal(want) { + // A server accepts one or more extensions by including a + // |Sec-WebSocket-Extensions| header field containing one or more + // extensions that were requested by the client. + // + // The interpretation of any extension parameters, and what + // constitutes a valid response by a server to a requested set of + // parameters by a client, will be defined by each such extension. + if bytes.Equal(option.Name, want.Name) { // Check parsed extension to be present in client // requested extensions. We move matched extension - // from client list to avoid allocation. + // from client list to avoid allocation of httphead.Option.Name, + // httphead.Option.Parameters have to be copied from the header + want.Parameters, _ = option.Parameters.Copy(make([]byte, option.Parameters.Size())) received = append(received, want) return true } diff --git a/vendor/github.com/gobwas/ws/dialer_tls_go18.go b/vendor/github.com/gobwas/ws/dialer_tls_go18.go index a6704d51..5589ee5e 100644 --- a/vendor/github.com/gobwas/ws/dialer_tls_go18.go +++ b/vendor/github.com/gobwas/ws/dialer_tls_go18.go @@ -1,3 +1,4 @@ +//go:build go1.8 // +build go1.8 package ws diff --git a/vendor/github.com/gobwas/ws/doc.go b/vendor/github.com/gobwas/ws/doc.go index c9d57915..0118ce2c 100644 --- a/vendor/github.com/gobwas/ws/doc.go +++ b/vendor/github.com/gobwas/ws/doc.go @@ -11,70 +11,70 @@ Upgrade to WebSocket (or WebSocket handshake) can be done in two ways. The first way is to use `net/http` server: - http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { - conn, _, _, err := ws.UpgradeHTTP(r, w) - }) + http.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) { + conn, _, _, err := ws.UpgradeHTTP(r, w) + }) The second and much more efficient way is so-called "zero-copy upgrade". It avoids redundant allocations and copying of not used headers or other request data. User decides by himself which data should be copied. - ln, err := net.Listen("tcp", ":8080") - if err != nil { - // handle error - } + ln, err := net.Listen("tcp", ":8080") + if err != nil { + // handle error + } - conn, err := ln.Accept() - if err != nil { - // handle error - } + conn, err := ln.Accept() + if err != nil { + // handle error + } - handshake, err := ws.Upgrade(conn) - if err != nil { - // handle error - } + handshake, err := ws.Upgrade(conn) + if err != nil { + // handle error + } For customization details see `ws.Upgrader` documentation. After WebSocket handshake you can work with connection in multiple ways. That is, `ws` does not force the only one way of how to work with WebSocket: - header, err := ws.ReadHeader(conn) - if err != nil { - // handle err - } + header, err := ws.ReadHeader(conn) + if err != nil { + // handle err + } - buf := make([]byte, header.Length) - _, err := io.ReadFull(conn, buf) - if err != nil { - // handle err - } + buf := make([]byte, header.Length) + _, err := io.ReadFull(conn, buf) + if err != nil { + // handle err + } - resp := ws.NewBinaryFrame([]byte("hello, world!")) - if err := ws.WriteFrame(conn, frame); err != nil { - // handle err - } + resp := ws.NewBinaryFrame([]byte("hello, world!")) + if err := ws.WriteFrame(conn, frame); err != nil { + // handle err + } As you can see, it stream friendly: - const N = 42 + const N = 42 - ws.WriteHeader(ws.Header{ - Fin: true, - Length: N, - OpCode: ws.OpBinary, - }) + ws.WriteHeader(ws.Header{ + Fin: true, + Length: N, + OpCode: ws.OpBinary, + }) - io.CopyN(conn, rand.Reader, N) + io.CopyN(conn, rand.Reader, N) Or: - header, err := ws.ReadHeader(conn) - if err != nil { - // handle err - } + header, err := ws.ReadHeader(conn) + if err != nil { + // handle err + } - io.CopyN(ioutil.Discard, conn, header.Length) + io.CopyN(ioutil.Discard, conn, header.Length) For more info see the documentation. */ diff --git a/vendor/github.com/gobwas/ws/errors.go b/vendor/github.com/gobwas/ws/errors.go index 48fce3b7..f5668b2b 100644 --- a/vendor/github.com/gobwas/ws/errors.go +++ b/vendor/github.com/gobwas/ws/errors.go @@ -2,12 +2,12 @@ package ws // RejectOption represents an option used to control the way connection is // rejected. -type RejectOption func(*rejectConnectionError) +type RejectOption func(*ConnectionRejectedError) // RejectionReason returns an option that makes connection to be rejected with // given reason. func RejectionReason(reason string) RejectOption { - return func(err *rejectConnectionError) { + return func(err *ConnectionRejectedError) { err.reason = reason } } @@ -15,7 +15,7 @@ func RejectionReason(reason string) RejectOption { // RejectionStatus returns an option that makes connection to be rejected with // given HTTP status code. func RejectionStatus(code int) RejectOption { - return func(err *rejectConnectionError) { + return func(err *ConnectionRejectedError) { err.code = code } } @@ -23,32 +23,37 @@ func RejectionStatus(code int) RejectOption { // RejectionHeader returns an option that makes connection to be rejected with // given HTTP headers. func RejectionHeader(h HandshakeHeader) RejectOption { - return func(err *rejectConnectionError) { + return func(err *ConnectionRejectedError) { err.header = h } } -// RejectConnectionError constructs an error that could be used to control the way -// handshake is rejected by Upgrader. +// RejectConnectionError constructs an error that could be used to control the +// way handshake is rejected by Upgrader. func RejectConnectionError(options ...RejectOption) error { - err := new(rejectConnectionError) + err := new(ConnectionRejectedError) for _, opt := range options { opt(err) } return err } -// rejectConnectionError represents a rejection of upgrade error. +// ConnectionRejectedError represents a rejection of connection during +// WebSocket handshake error. // -// It can be returned by Upgrader's On* hooks to control the way WebSocket -// handshake is rejected. -type rejectConnectionError struct { +// It can be returned by Upgrader's On* hooks to indicate that WebSocket +// handshake should be rejected. +type ConnectionRejectedError struct { reason string code int header HandshakeHeader } // Error implements error interface. -func (r *rejectConnectionError) Error() string { +func (r *ConnectionRejectedError) Error() string { return r.reason } + +func (r *ConnectionRejectedError) StatusCode() int { + return r.code +} diff --git a/vendor/github.com/gobwas/ws/frame.go b/vendor/github.com/gobwas/ws/frame.go index f157ee3e..ae10144e 100644 --- a/vendor/github.com/gobwas/ws/frame.go +++ b/vendor/github.com/gobwas/ws/frame.go @@ -206,6 +206,28 @@ func (h Header) Rsv2() bool { return h.Rsv&bit6 != 0 } // Rsv3 reports whether the header has third rsv bit set. func (h Header) Rsv3() bool { return h.Rsv&bit7 != 0 } +// Rsv creates rsv byte representation from bits. +func Rsv(r1, r2, r3 bool) (rsv byte) { + if r1 { + rsv |= bit5 + } + if r2 { + rsv |= bit6 + } + if r3 { + rsv |= bit7 + } + return rsv +} + +// RsvBits returns rsv bits from bytes representation. +func RsvBits(rsv byte) (r1, r2, r3 bool) { + r1 = rsv&bit5 != 0 + r2 = rsv&bit6 != 0 + r3 = rsv&bit7 != 0 + return r1, r2, r3 +} + // Frame represents websocket frame. // See https://tools.ietf.org/html/rfc6455#section-5.2 type Frame struct { @@ -319,6 +341,29 @@ func MaskFrameInPlace(f Frame) Frame { return MaskFrameInPlaceWith(f, NewMask()) } +var zeroMask [4]byte + +// UnmaskFrame unmasks frame and returns frame with unmasked payload and Mask +// header's field cleared. +// Note that it copies f payload. +func UnmaskFrame(f Frame) Frame { + p := make([]byte, len(f.Payload)) + copy(p, f.Payload) + f.Payload = p + return UnmaskFrameInPlace(f) +} + +// UnmaskFrameInPlace unmasks frame and returns frame with unmasked payload and +// Mask header's field cleared. +// Note that it applies xor cipher to f.Payload without copying, that is, it +// modifies f.Payload inplace. +func UnmaskFrameInPlace(f Frame) Frame { + Cipher(f.Payload, f.Header.Mask, 0) + f.Header.Masked = false + f.Header.Mask = zeroMask + return f +} + // MaskFrameInPlaceWith masks frame with given mask and returns frame // with masked payload and Mask header's field set. // Note that it applies xor cipher to f.Payload without copying, that is, it @@ -333,7 +378,7 @@ func MaskFrameInPlaceWith(f Frame, m [4]byte) Frame { // NewMask creates new random mask. func NewMask() (ret [4]byte) { binary.BigEndian.PutUint32(ret[:], rand.Uint32()) - return + return ret } // CompileFrame returns byte representation of given frame. @@ -343,7 +388,7 @@ func CompileFrame(f Frame) (bts []byte, err error) { buf := bytes.NewBuffer(make([]byte, 0, 16)) err = WriteFrame(buf, f) bts = buf.Bytes() - return + return bts, err } // MustCompileFrame is like CompileFrame but panics if frame can not be @@ -356,20 +401,6 @@ func MustCompileFrame(f Frame) []byte { return bts } -// Rsv creates rsv byte representation. -func Rsv(r1, r2, r3 bool) (rsv byte) { - if r1 { - rsv |= bit5 - } - if r2 { - rsv |= bit6 - } - if r3 { - rsv |= bit7 - } - return rsv -} - func makeCloseFrame(code StatusCode) Frame { return NewCloseFrame(NewCloseFrameBody(code, "")) } diff --git a/vendor/github.com/gobwas/ws/http.go b/vendor/github.com/gobwas/ws/http.go index e18df441..129e77ec 100644 --- a/vendor/github.com/gobwas/ws/http.go +++ b/vendor/github.com/gobwas/ws/http.go @@ -5,7 +5,6 @@ import ( "bytes" "io" "net/http" - "net/textproto" "net/url" "strconv" @@ -38,7 +37,8 @@ var ( textTailErrUpgradeRequired = errorText(ErrHandshakeUpgradeRequired) ) -var ( +const ( + // Every new header must be added to TestHeaderNames test. headerHost = "Host" headerUpgrade = "Upgrade" headerConnection = "Connection" @@ -48,14 +48,14 @@ var ( headerSecKey = "Sec-WebSocket-Key" headerSecAccept = "Sec-WebSocket-Accept" - headerHostCanonical = textproto.CanonicalMIMEHeaderKey(headerHost) - headerUpgradeCanonical = textproto.CanonicalMIMEHeaderKey(headerUpgrade) - headerConnectionCanonical = textproto.CanonicalMIMEHeaderKey(headerConnection) - headerSecVersionCanonical = textproto.CanonicalMIMEHeaderKey(headerSecVersion) - headerSecProtocolCanonical = textproto.CanonicalMIMEHeaderKey(headerSecProtocol) - headerSecExtensionsCanonical = textproto.CanonicalMIMEHeaderKey(headerSecExtensions) - headerSecKeyCanonical = textproto.CanonicalMIMEHeaderKey(headerSecKey) - headerSecAcceptCanonical = textproto.CanonicalMIMEHeaderKey(headerSecAccept) + headerHostCanonical = headerHost + headerUpgradeCanonical = headerUpgrade + headerConnectionCanonical = headerConnection + headerSecVersionCanonical = "Sec-Websocket-Version" + headerSecProtocolCanonical = "Sec-Websocket-Protocol" + headerSecExtensionsCanonical = "Sec-Websocket-Extensions" + headerSecKeyCanonical = "Sec-Websocket-Key" + headerSecAcceptCanonical = "Sec-Websocket-Accept" ) var ( @@ -91,10 +91,8 @@ func httpParseRequestLine(line []byte) (req httpRequestLine, err error) { req.major, req.minor, ok = httpParseVersion(proto) if !ok { err = ErrMalformedRequest - return } - - return + return req, err } func httpParseResponseLine(line []byte) (resp httpResponseLine, err error) { @@ -128,25 +126,25 @@ func httpParseVersion(bts []byte) (major, minor int, ok bool) { case bytes.Equal(bts, httpVersion1_1): return 1, 1, true case len(bts) < 8: - return + return 0, 0, false case !bytes.Equal(bts[:5], httpVersionPrefix): - return + return 0, 0, false } bts = bts[5:] dot := bytes.IndexByte(bts, '.') if dot == -1 { - return + return 0, 0, false } var err error major, err = asciiToInt(bts[:dot]) if err != nil { - return + return major, 0, false } minor, err = asciiToInt(bts[dot+1:]) if err != nil { - return + return major, minor, false } return major, minor, true @@ -157,7 +155,7 @@ func httpParseVersion(bts []byte) (major, minor int, ok bool) { func httpParseHeaderLine(line []byte) (k, v []byte, ok bool) { colon := bytes.IndexByte(line, ':') if colon == -1 { - return + return nil, nil, false } k = btrim(line[:colon]) @@ -198,8 +196,9 @@ func strSelectProtocol(h string, check func(string) bool) (ret string, ok bool) } return true }) - return + return ret, ok } + func btsSelectProtocol(h []byte, check func([]byte) bool) (ret string, ok bool) { var selected []byte ok = httphead.ScanTokens(h, func(v []byte) bool { @@ -212,21 +211,57 @@ func btsSelectProtocol(h []byte, check func([]byte) bool) (ret string, ok bool) if ok && selected != nil { return string(selected), true } - return -} - -func strSelectExtensions(h string, selected []httphead.Option, check func(httphead.Option) bool) ([]httphead.Option, bool) { - return btsSelectExtensions(strToBytes(h), selected, check) + return ret, ok } func btsSelectExtensions(h []byte, selected []httphead.Option, check func(httphead.Option) bool) ([]httphead.Option, bool) { s := httphead.OptionSelector{ - Flags: httphead.SelectUnique | httphead.SelectCopy, + Flags: httphead.SelectCopy, Check: check, } return s.Select(h, selected) } +func negotiateMaybe(in httphead.Option, dest []httphead.Option, f func(httphead.Option) (httphead.Option, error)) ([]httphead.Option, error) { + if in.Size() == 0 { + return dest, nil + } + opt, err := f(in) + if err != nil { + return nil, err + } + if opt.Size() > 0 { + dest = append(dest, opt) + } + return dest, nil +} + +func negotiateExtensions( + h []byte, dest []httphead.Option, + f func(httphead.Option) (httphead.Option, error), +) (_ []httphead.Option, err error) { + index := -1 + var current httphead.Option + ok := httphead.ScanOptions(h, func(i int, name, attr, val []byte) httphead.Control { + if i != index { + dest, err = negotiateMaybe(current, dest, f) + if err != nil { + return httphead.ControlBreak + } + index = i + current = httphead.Option{Name: name} + } + if attr != nil { + current.Parameters.Set(attr, val) + } + return httphead.ControlContinue + }) + if !ok { + return nil, ErrMalformedRequest + } + return negotiateMaybe(current, dest, f) +} + func httpWriteHeader(bw *bufio.Writer, key, value string) { httpWriteHeaderKey(bw, key) bw.WriteString(value) diff --git a/vendor/github.com/gobwas/ws/nonce.go b/vendor/github.com/gobwas/ws/nonce.go index e694da7c..7b0edd97 100644 --- a/vendor/github.com/gobwas/ws/nonce.go +++ b/vendor/github.com/gobwas/ws/nonce.go @@ -65,8 +65,6 @@ func initAcceptFromNonce(accept, nonce []byte) { sum := sha1.Sum(p) base64.StdEncoding.Encode(accept, sum[:]) - - return } func writeAccept(bw *bufio.Writer, nonce []byte) (int, error) { diff --git a/vendor/github.com/gobwas/ws/read.go b/vendor/github.com/gobwas/ws/read.go index bc653e46..1771816a 100644 --- a/vendor/github.com/gobwas/ws/read.go +++ b/vendor/github.com/gobwas/ws/read.go @@ -24,7 +24,7 @@ func ReadHeader(r io.Reader) (h Header, err error) { // Prepare to hold first 2 bytes to choose size of next read. _, err = io.ReadFull(r, bts) if err != nil { - return + return h, err } h.Fin = bts[0]&bit0 != 0 @@ -51,11 +51,11 @@ func ReadHeader(r io.Reader) (h Header, err error) { default: err = ErrHeaderLengthUnexpected - return + return h, err } if extra == 0 { - return + return h, err } // Increase len of bts to extra bytes need to read. @@ -63,7 +63,7 @@ func ReadHeader(r io.Reader) (h Header, err error) { bts = bts[:extra] _, err = io.ReadFull(r, bts) if err != nil { - return + return h, err } switch { @@ -74,7 +74,7 @@ func ReadHeader(r io.Reader) (h Header, err error) { case length == 127: if bts[0]&0x80 != 0 { err = ErrHeaderLengthMSB - return + return h, err } h.Length = int64(binary.BigEndian.Uint64(bts[:8])) bts = bts[8:] @@ -84,7 +84,7 @@ func ReadHeader(r io.Reader) (h Header, err error) { copy(h.Mask[:], bts) } - return + return h, nil } // ReadFrame reads a frame from r. @@ -95,7 +95,7 @@ func ReadHeader(r io.Reader) (h Header, err error) { func ReadFrame(r io.Reader) (f Frame, err error) { f.Header, err = ReadHeader(r) if err != nil { - return + return f, err } if f.Header.Length > 0 { @@ -105,7 +105,7 @@ func ReadFrame(r io.Reader) (f Frame, err error) { _, err = io.ReadFull(r, f.Payload) } - return + return f, err } // MustReadFrame is like ReadFrame but panics if frame can not be read. @@ -128,20 +128,20 @@ func ParseCloseFrameData(payload []byte) (code StatusCode, reason string) { // In other words, we ignoring this rule [RFC6455:7.1.5]: // If this Close control frame contains no status code, _The WebSocket // Connection Close Code_ is considered to be 1005. - return + return code, reason } code = StatusCode(binary.BigEndian.Uint16(payload)) reason = string(payload[2:]) - return + return code, reason } // ParseCloseFrameDataUnsafe is like ParseCloseFrameData except the thing // that it does not copies payload bytes into reason, but prepares unsafe cast. func ParseCloseFrameDataUnsafe(payload []byte) (code StatusCode, reason string) { if len(payload) < 2 { - return + return code, reason } code = StatusCode(binary.BigEndian.Uint16(payload)) reason = btsToString(payload[2:]) - return + return code, reason } diff --git a/vendor/github.com/gobwas/ws/server.go b/vendor/github.com/gobwas/ws/server.go index 62ad9c7f..f6cc8af3 100644 --- a/vendor/github.com/gobwas/ws/server.go +++ b/vendor/github.com/gobwas/ws/server.go @@ -24,11 +24,11 @@ const ( var ( ErrHandshakeBadProtocol = RejectConnectionError( RejectionStatus(http.StatusHTTPVersionNotSupported), - RejectionReason(fmt.Sprintf("handshake error: bad HTTP protocol version")), + RejectionReason("handshake error: bad HTTP protocol version"), ) ErrHandshakeBadMethod = RejectConnectionError( RejectionStatus(http.StatusMethodNotAllowed), - RejectionReason(fmt.Sprintf("handshake error: bad HTTP request method")), + RejectionReason("handshake error: bad HTTP request method"), ) ErrHandshakeBadHost = RejectConnectionError( RejectionStatus(http.StatusBadRequest), @@ -129,7 +129,22 @@ type HTTPUpgrader struct { // Extension is the select function that is used to select extensions from // list requested by client. If this field is set, then the all matched // extensions are sent to a client as negotiated. + // + // Deprecated: use Negotiate instead. Extension func(httphead.Option) bool + + // Negotiate is the callback that is used to negotiate extensions from + // the client's offer. If this field is set, then the returned non-zero + // extensions are sent to the client as accepted extensions in the + // response. + // + // The argument is only valid until the Negotiate callback returns. + // + // If returned error is non-nil then connection is rejected and response is + // sent with appropriate HTTP error code and body set to error message. + // + // RejectConnectionError could be used to get more control on response. + Negotiate func(httphead.Option) (httphead.Option, error) } // Upgrade upgrades http connection to the websocket connection. @@ -148,7 +163,7 @@ func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net. } if err != nil { httpError(w, err.Error(), http.StatusInternalServerError) - return + return conn, rw, hs, err } // See https://tools.ietf.org/html/rfc6455#section-4.1 @@ -200,11 +215,20 @@ func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net. } } } - if check := u.Extension; err == nil && check != nil { + if f := u.Negotiate; err == nil && f != nil { + for _, h := range r.Header[headerSecExtensionsCanonical] { + hs.Extensions, err = negotiateExtensions(strToBytes(h), hs.Extensions, f) + if err != nil { + break + } + } + } + // DEPRECATED path. + if check := u.Extension; err == nil && check != nil && u.Negotiate == nil { xs := r.Header[headerSecExtensionsCanonical] for i := 0; i < len(xs) && err == nil; i++ { var ok bool - hs.Extensions, ok = strSelectExtensions(xs[i], hs.Extensions, check) + hs.Extensions, ok = btsSelectExtensions(strToBytes(xs[i]), hs.Extensions, check) if !ok { err = ErrMalformedRequest } @@ -227,7 +251,7 @@ func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net. err = rw.Writer.Flush() } else { var code int - if rej, ok := err.(*rejectConnectionError); ok { + if rej, ok := err.(*ConnectionRejectedError); ok { code = rej.code header[1] = rej.header } @@ -236,9 +260,9 @@ func (u HTTPUpgrader) Upgrade(r *http.Request, w http.ResponseWriter) (conn net. } httpWriteResponseError(rw.Writer, err, code, header.WriteTo) // Do not store Flush() error to not override already existing one. - rw.Writer.Flush() + _ = rw.Writer.Flush() } - return + return conn, rw, hs, err } // Upgrader contains options for upgrading connection to websocket. @@ -271,6 +295,9 @@ type Upgrader struct { // from list requested by client. If this field is set, then the all matched // extensions are sent to a client as negotiated. // + // Note that Extension may be called multiple times and implementations + // must track uniqueness of accepted extensions manually. + // // The argument is only valid until the callback returns. // // According to the RFC6455 order of extensions passed by a client is @@ -283,13 +310,38 @@ type Upgrader struct { // fields listed by the client in its request represent a preference of the // header fields it wishes to use, with the first options listed being most // preferable." + // + // Deprecated: use Negotiate instead. Extension func(httphead.Option) bool - // ExtensionCustom allow user to parse Sec-WebSocket-Extensions header manually. + // ExtensionCustom allow user to parse Sec-WebSocket-Extensions header + // manually. + // + // If ExtensionCustom() decides to accept received extension, it must + // append appropriate option to the given slice of httphead.Option. + // It returns results of append() to the given slice and a flag that + // reports whether given header value is wellformed or not. + // + // Note that ExtensionCustom may be called multiple times and + // implementations must track uniqueness of accepted extensions manually. + // // Note that returned options should be valid until Upgrade returns. // If ExtensionCustom is set, it used instead of Extension function. ExtensionCustom func([]byte, []httphead.Option) ([]httphead.Option, bool) + // Negotiate is the callback that is used to negotiate extensions from + // the client's offer. If this field is set, then the returned non-zero + // extensions are sent to the client as accepted extensions in the + // response. + // + // The argument is only valid until the Negotiate callback returns. + // + // If returned error is non-nil then connection is rejected and response is + // sent with appropriate HTTP error code and body set to error message. + // + // RejectConnectionError could be used to get more control on response. + Negotiate func(httphead.Option) (httphead.Option, error) + // Header is an optional HandshakeHeader instance that could be used to // write additional headers to the handshake response. // @@ -399,12 +451,12 @@ func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) { // Read HTTP request line like "GET /ws HTTP/1.1". rl, err := readLine(br) if err != nil { - return + return hs, err } // Parse request line data like HTTP version, uri and method. req, err := httpParseRequestLine(rl) if err != nil { - return + return hs, err } // Prepare stack-based handshake header list. @@ -497,7 +549,7 @@ func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) { if len(v) != nonceSize { err = ErrHandshakeBadSecKey } else { - copy(nonce[:], v) + copy(nonce, v) } case headerSecProtocolCanonical: @@ -514,7 +566,11 @@ func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) { } case headerSecExtensionsCanonical: - if custom, check := u.ExtensionCustom, u.Extension; custom != nil || check != nil { + if f := u.Negotiate; err == nil && f != nil { + hs.Extensions, err = negotiateExtensions(v, hs.Extensions, f) + } + // DEPRECATED path. + if custom, check := u.ExtensionCustom, u.Extension; u.Negotiate == nil && (custom != nil || check != nil) { var ok bool if custom != nil { hs.Extensions, ok = custom(v, hs.Extensions) @@ -574,7 +630,7 @@ func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) { } if err != nil { var code int - if rej, ok := err.(*rejectConnectionError); ok { + if rej, ok := err.(*ConnectionRejectedError); ok { code = rej.code header[1] = rej.header } @@ -583,14 +639,14 @@ func (u Upgrader) Upgrade(conn io.ReadWriter) (hs Handshake, err error) { } httpWriteResponseError(bw, err, code, header.WriteTo) // Do not store Flush() error to not override already existing one. - bw.Flush() - return + _ = bw.Flush() + return hs, err } httpWriteResponseUpgrade(bw, nonce, hs, header.WriteTo) err = bw.Flush() - return + return hs, err } type handshakeHeader [2]HandshakeHeader diff --git a/vendor/github.com/gobwas/ws/server_test.s b/vendor/github.com/gobwas/ws/server_test.s deleted file mode 100644 index e69de29b..00000000 diff --git a/vendor/github.com/gobwas/ws/util.go b/vendor/github.com/gobwas/ws/util.go index 67ad906e..1dd5aa60 100644 --- a/vendor/github.com/gobwas/ws/util.go +++ b/vendor/github.com/gobwas/ws/util.go @@ -4,8 +4,6 @@ import ( "bufio" "bytes" "fmt" - "reflect" - "unsafe" "github.com/gobwas/httphead" ) @@ -41,19 +39,6 @@ func SelectEqual(v string) func(string) bool { } } -func strToBytes(str string) (bts []byte) { - s := (*reflect.StringHeader)(unsafe.Pointer(&str)) - b := (*reflect.SliceHeader)(unsafe.Pointer(&bts)) - b.Data = s.Data - b.Len = s.Len - b.Cap = s.Len - return -} - -func btsToString(bts []byte) (str string) { - return *(*string)(unsafe.Pointer(&bts)) -} - // asciiToInt converts bytes to int. func asciiToInt(bts []byte) (ret int, err error) { // ASCII numbers all start with the high-order bits 0011. @@ -73,7 +58,7 @@ func asciiToInt(bts []byte) (ret int, err error) { } // pow for integers implementation. -// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3 +// See Donald Knuth, The Art of Computer Programming, Volume 2, Section 4.6.3. func pow(a, b int) int { p := 1 for b > 0 { @@ -116,7 +101,7 @@ func btsHasToken(header, token []byte) (has bool) { has = bytes.EqualFold(v, token) return !has }) - return + return has } const ( diff --git a/vendor/github.com/gobwas/ws/util_purego.go b/vendor/github.com/gobwas/ws/util_purego.go new file mode 100644 index 00000000..449b3fdf --- /dev/null +++ b/vendor/github.com/gobwas/ws/util_purego.go @@ -0,0 +1,12 @@ +//go:build purego +// +build purego + +package ws + +func strToBytes(str string) (bts []byte) { + return []byte(str) +} + +func btsToString(bts []byte) (str string) { + return string(bts) +} diff --git a/vendor/github.com/gobwas/ws/util_unsafe.go b/vendor/github.com/gobwas/ws/util_unsafe.go new file mode 100644 index 00000000..b732297c --- /dev/null +++ b/vendor/github.com/gobwas/ws/util_unsafe.go @@ -0,0 +1,22 @@ +//go:build !purego +// +build !purego + +package ws + +import ( + "reflect" + "unsafe" +) + +func strToBytes(str string) (bts []byte) { + s := (*reflect.StringHeader)(unsafe.Pointer(&str)) + b := (*reflect.SliceHeader)(unsafe.Pointer(&bts)) + b.Data = s.Data + b.Len = s.Len + b.Cap = s.Len + return bts +} + +func btsToString(bts []byte) (str string) { + return *(*string)(unsafe.Pointer(&bts)) +} diff --git a/vendor/github.com/gobwas/ws/wsutil/cipher.go b/vendor/github.com/gobwas/ws/wsutil/cipher.go index f234be73..bc25064f 100644 --- a/vendor/github.com/gobwas/ws/wsutil/cipher.go +++ b/vendor/github.com/gobwas/ws/wsutil/cipher.go @@ -34,7 +34,7 @@ func (c *CipherReader) Read(p []byte) (n int, err error) { n, err = c.r.Read(p) ws.Cipher(p[:n], c.mask, c.pos) c.pos += n - return + return n, err } // CipherWriter implements io.Writer that applies xor-cipher to the bytes @@ -68,5 +68,5 @@ func (c *CipherWriter) Write(p []byte) (n int, err error) { n, err = c.w.Write(cp) c.pos += n - return + return n, err } diff --git a/vendor/github.com/gobwas/ws/wsutil/dialer.go b/vendor/github.com/gobwas/ws/wsutil/dialer.go index 91c03d51..4f8788fb 100644 --- a/vendor/github.com/gobwas/ws/wsutil/dialer.go +++ b/vendor/github.com/gobwas/ws/wsutil/dialer.go @@ -113,6 +113,7 @@ type rwConn struct { func (rwc rwConn) Read(p []byte) (int, error) { return rwc.r.Read(p) } + func (rwc rwConn) Write(p []byte) (int, error) { return rwc.w.Write(p) } diff --git a/vendor/github.com/gobwas/ws/wsutil/extenstion.go b/vendor/github.com/gobwas/ws/wsutil/extenstion.go new file mode 100644 index 00000000..6e1ebbf4 --- /dev/null +++ b/vendor/github.com/gobwas/ws/wsutil/extenstion.go @@ -0,0 +1,31 @@ +package wsutil + +import "github.com/gobwas/ws" + +// RecvExtension is an interface for clearing fragment header RSV bits. +type RecvExtension interface { + UnsetBits(ws.Header) (ws.Header, error) +} + +// RecvExtensionFunc is an adapter to allow the use of ordinary functions as +// RecvExtension. +type RecvExtensionFunc func(ws.Header) (ws.Header, error) + +// BitsRecv implements RecvExtension. +func (fn RecvExtensionFunc) UnsetBits(h ws.Header) (ws.Header, error) { + return fn(h) +} + +// SendExtension is an interface for setting fragment header RSV bits. +type SendExtension interface { + SetBits(ws.Header) (ws.Header, error) +} + +// SendExtensionFunc is an adapter to allow the use of ordinary functions as +// SendExtension. +type SendExtensionFunc func(ws.Header) (ws.Header, error) + +// BitsSend implements SendExtension. +func (fn SendExtensionFunc) SetBits(h ws.Header) (ws.Header, error) { + return fn(h) +} diff --git a/vendor/github.com/gobwas/ws/wsutil/handler.go b/vendor/github.com/gobwas/ws/wsutil/handler.go index abb7cb73..44fd360e 100644 --- a/vendor/github.com/gobwas/ws/wsutil/handler.go +++ b/vendor/github.com/gobwas/ws/wsutil/handler.go @@ -199,7 +199,7 @@ func (c ControlHandler) HandleClose(h ws.Header) error { if err != nil { return err } - if err = w.Flush(); err != nil { + if err := w.Flush(); err != nil { return err } return ClosedError{ diff --git a/vendor/github.com/gobwas/ws/wsutil/helper.go b/vendor/github.com/gobwas/ws/wsutil/helper.go index 001e9d9e..231760bc 100644 --- a/vendor/github.com/gobwas/ws/wsutil/helper.go +++ b/vendor/github.com/gobwas/ws/wsutil/helper.go @@ -64,14 +64,14 @@ func ReadMessage(r io.Reader, s ws.State, m []Message) ([]Message, error) { // ReadClientMessage reads next message from r, considering that caller // represents server side. -// It is a shortcut for ReadMessage(r, ws.StateServerSide, m) +// It is a shortcut for ReadMessage(r, ws.StateServerSide, m). func ReadClientMessage(r io.Reader, m []Message) ([]Message, error) { return ReadMessage(r, ws.StateServerSide, m) } // ReadServerMessage reads next message from r, considering that caller // represents client side. -// It is a shortcut for ReadMessage(r, ws.StateClientSide, m) +// It is a shortcut for ReadMessage(r, ws.StateClientSide, m). func ReadServerMessage(r io.Reader, m []Message) ([]Message, error) { return ReadMessage(r, ws.StateClientSide, m) } @@ -113,7 +113,7 @@ func ReadClientText(rw io.ReadWriter) ([]byte, error) { // It discards received text messages. // // Note this may handle and write control frames into the writer part of a given -// io.ReadWriter. +// io.ReadWriter. func ReadClientBinary(rw io.ReadWriter) ([]byte, error) { p, _, err := readData(rw, ws.StateServerSide, ws.OpBinary) return p, err @@ -133,7 +133,7 @@ func ReadServerData(rw io.ReadWriter) ([]byte, ws.OpCode, error) { // It discards received binary messages. // // Note this may handle and write control frames into the writer part of a given -// io.ReadWriter. +// io.ReadWriter. func ReadServerText(rw io.ReadWriter) ([]byte, error) { p, _, err := readData(rw, ws.StateClientSide, ws.OpText) return p, err diff --git a/vendor/github.com/gobwas/ws/wsutil/reader.go b/vendor/github.com/gobwas/ws/wsutil/reader.go index 5f64c632..ff2e5b63 100644 --- a/vendor/github.com/gobwas/ws/wsutil/reader.go +++ b/vendor/github.com/gobwas/ws/wsutil/reader.go @@ -12,6 +12,10 @@ import ( // preceding NextFrame() call. var ErrNoFrameAdvance = errors.New("no frame advance") +// ErrFrameTooLarge indicates that a message of length higher than +// MaxFrameSize was being read. +var ErrFrameTooLarge = errors.New("frame too large") + // FrameHandlerFunc handles parsed frame header and its body represented by // io.Reader. // @@ -37,7 +41,17 @@ type Reader struct { // bytes are not valid UTF-8 sequence, ErrInvalidUTF8 returned. CheckUTF8 bool - // TODO(gobwas): add max frame size limit here. + // Extensions is a list of negotiated extensions for reader Source. + // It is used to meet the specs and clear appropriate bits in fragment + // header RSV segment. + Extensions []RecvExtension + + // MaxFrameSize controls the maximum frame size in bytes + // that can be read. A message exceeding that size will return + // a ErrFrameTooLarge to the application. + // + // Not setting this field means there is no limit. + MaxFrameSize int64 OnContinuation FrameHandlerFunc OnIntermediate FrameHandlerFunc @@ -97,12 +111,13 @@ func (r *Reader) Read(p []byte) (n int, err error) { n, err = r.frame.Read(p) if err != nil && err != io.EOF { - return + return n, err } if err == nil && r.raw.N != 0 { - return + return n, nil } + // EOF condition (either err is io.EOF or r.raw.N is zero). switch { case r.raw.N != 0: err = io.ErrUnexpectedEOF @@ -112,6 +127,8 @@ func (r *Reader) Read(p []byte) (n int, err error) { r.resetFragment() case r.CheckUTF8 && !r.utf8.Valid(): + // NOTE: check utf8 only when full message received, since partial + // reads may be invalid. n = r.utf8.Accepted() err = ErrInvalidUTF8 @@ -120,7 +137,7 @@ func (r *Reader) Read(p []byte) (n int, err error) { err = io.EOF } - return + return n, err } // Discard discards current message unread bytes. @@ -166,14 +183,29 @@ func (r *Reader) NextFrame() (hdr ws.Header, err error) { return hdr, err } + if n := r.MaxFrameSize; n > 0 && hdr.Length > n { + return hdr, ErrFrameTooLarge + } + // Save raw reader to use it on discarding frame without ciphering and // other streaming checks. - r.raw = io.LimitedReader{r.Source, hdr.Length} + r.raw = io.LimitedReader{ + R: r.Source, + N: hdr.Length, + } frame := io.Reader(&r.raw) if hdr.Masked { frame = NewCipherReader(frame, hdr.Mask) } + + for _, x := range r.Extensions { + hdr, err = x.UnsetBits(hdr) + if err != nil { + return hdr, err + } + } + if r.fragmented() { if hdr.OpCode.IsControl() { if cb := r.OnIntermediate; cb != nil { @@ -183,7 +215,7 @@ func (r *Reader) NextFrame() (hdr ws.Header, err error) { // Ensure that src is empty. _, err = io.Copy(ioutil.Discard, &r.raw) } - return + return hdr, err } } else { r.opCode = hdr.OpCode @@ -208,7 +240,7 @@ func (r *Reader) NextFrame() (hdr ws.Header, err error) { r.State = r.State.Set(ws.StateFragmented) } - return + return hdr, err } func (r *Reader) fragmented() bool { diff --git a/vendor/github.com/gobwas/ws/wsutil/utf8.go b/vendor/github.com/gobwas/ws/wsutil/utf8.go index d877be0b..b8dc7264 100644 --- a/vendor/github.com/gobwas/ws/wsutil/utf8.go +++ b/vendor/github.com/gobwas/ws/wsutil/utf8.go @@ -65,7 +65,7 @@ func (u *UTF8Reader) Read(p []byte) (n int, err error) { u.state, u.codep = s, c u.accepted = accepted - return + return n, err } // Valid checks current reader state. It returns true if all read bytes are diff --git a/vendor/github.com/gobwas/ws/wsutil/writer.go b/vendor/github.com/gobwas/ws/wsutil/writer.go index c76b0b42..6a837cf6 100644 --- a/vendor/github.com/gobwas/ws/wsutil/writer.go +++ b/vendor/github.com/gobwas/ws/wsutil/writer.go @@ -84,38 +84,6 @@ func (c *ControlWriter) Flush() error { return c.w.Flush() } -// Writer contains logic of buffering output data into a WebSocket fragments. -// It is much the same as bufio.Writer, except the thing that it works with -// WebSocket frames, not the raw data. -// -// Writer writes frames with specified OpCode. -// It uses ws.State to decide whether the output frames must be masked. -// -// Note that it does not check control frame size or other RFC rules. -// That is, it must be used with special care to write control frames without -// violation of RFC. You could use ControlWriter that wraps Writer and contains -// some guards for writing control frames. -// -// If an error occurs writing to a Writer, no more data will be accepted and -// all subsequent writes will return the error. -// After all data has been written, the client should call the Flush() method -// to guarantee all data has been forwarded to the underlying io.Writer. -type Writer struct { - dest io.Writer - - n int // Buffered bytes counter. - raw []byte // Raw representation of buffer, including reserved header bytes. - buf []byte // Writeable part of buffer, without reserved header bytes. - - op ws.OpCode - state ws.State - - dirty bool - fragmented bool - - err error -} - var writers = pool.New(128, 65536) // GetWriter tries to reuse Writer getting it from the pool. @@ -145,6 +113,58 @@ func PutWriter(w *Writer) { writers.Put(w, w.Size()) } +// Writer contains logic of buffering output data into a WebSocket fragments. +// It is much the same as bufio.Writer, except the thing that it works with +// WebSocket frames, not the raw data. +// +// Writer writes frames with specified OpCode. +// It uses ws.State to decide whether the output frames must be masked. +// +// Note that it does not check control frame size or other RFC rules. +// That is, it must be used with special care to write control frames without +// violation of RFC. You could use ControlWriter that wraps Writer and contains +// some guards for writing control frames. +// +// If an error occurs writing to a Writer, no more data will be accepted and +// all subsequent writes will return the error. +// +// After all data has been written, the client should call the Flush() method +// to guarantee all data has been forwarded to the underlying io.Writer. +type Writer struct { + // dest specifies a destination of buffer flushes. + dest io.Writer + + // op specifies the WebSocket operation code used in flushed frames. + op ws.OpCode + + // state specifies the state of the Writer. + state ws.State + + // extensions is a list of negotiated extensions for writer Dest. + // It is used to meet the specs and set appropriate bits in fragment + // header RSV segment. + extensions []SendExtension + + // noFlush reports whether buffer must grow instead of being flushed. + noFlush bool + + // Raw representation of the buffer, including reserved header bytes. + raw []byte + + // Writeable part of buffer, without reserved header bytes. + // Resetting this to nil will not result in reallocation if raw is not nil. + // And vice versa: if buf is not nil, then Writer is assumed as ready and + // initialized. + buf []byte + + // Buffered bytes counter. + n int + + dirty bool + fseq int + err error +} + // NewWriter returns a new Writer whose buffer has the DefaultWriteBuffer size. func NewWriter(dest io.Writer, state ws.State, op ws.OpCode) *Writer { return NewWriterBufferSize(dest, state, op, 0) @@ -186,57 +206,63 @@ func NewWriterBufferSize(dest io.Writer, state ws.State, op ws.OpCode, n int) *W // // It panics if len(buf) is too small to fit header and payload data. func NewWriterBuffer(dest io.Writer, state ws.State, op ws.OpCode, buf []byte) *Writer { - offset := reserve(state, len(buf)) - if len(buf) <= offset { - panic("buffer too small") - } - - return &Writer{ + w := &Writer{ dest: dest, - raw: buf, - buf: buf[offset:], state: state, op: op, + raw: buf, } + w.initBuf() + return w } -func reserve(state ws.State, n int) (offset int) { - var mask int - if state.ClientSide() { - mask = 4 - } - - switch { - case n <= int(len7)+mask+2: - return mask + 2 - case n <= int(len16)+mask+4: - return mask + 4 - default: - return mask + 10 +func (w *Writer) initBuf() { + offset := reserve(w.state, len(w.raw)) + if len(w.raw) <= offset { + panic("wsutil: writer buffer is too small") } + w.buf = w.raw[offset:] } -// headerSize returns number of bytes needed to encode header of a frame with -// given state and length. -func headerSize(s ws.State, n int) int { - return ws.HeaderSize(ws.Header{ - Length: int64(n), - Masked: s.ClientSide(), - }) -} - -// Reset discards any buffered data, clears error, and resets w to have given -// state and write frames with given OpCode to dest. +// Reset resets Writer as it was created by New() methods. +// Note that Reset does reset extensions and other options was set after +// Writer initialization. func (w *Writer) Reset(dest io.Writer, state ws.State, op ws.OpCode) { - w.n = 0 - w.dirty = false - w.fragmented = false w.dest = dest w.state = state w.op = op + + w.initBuf() + + w.n = 0 + w.dirty = false + w.fseq = 0 + w.extensions = w.extensions[:0] + w.noFlush = false } -// Size returns the size of the underlying buffer in bytes. +// ResetOp is an quick version of Reset(). +// ResetOp does reset unwritten fragments and does not reset results of +// SetExtensions() or DisableFlush() methods. +func (w *Writer) ResetOp(op ws.OpCode) { + w.op = op + w.n = 0 + w.dirty = false + w.fseq = 0 +} + +// SetExtensions adds xs as extensions to be used during writes. +func (w *Writer) SetExtensions(xs ...SendExtension) { + w.extensions = xs +} + +// DisableFlush denies Writer to write fragments. +func (w *Writer) DisableFlush() { + w.noFlush = true +} + +// Size returns the size of the underlying buffer in bytes (not including +// WebSocket header bytes). func (w *Writer) Size() int { return len(w.buf) } @@ -263,6 +289,10 @@ func (w *Writer) Write(p []byte) (n int, err error) { var nn int for len(p) > w.Available() && w.err == nil { + if w.noFlush { + w.Grow(len(p)) + continue + } if w.Buffered() == 0 { // Large write, empty buffer. Write directly from p to avoid copy. // Trade off here is that we make additional Write() to underlying @@ -295,6 +325,55 @@ func (w *Writer) Write(p []byte) (n int, err error) { return n, w.err } +func ceilPowerOfTwo(n int) int { + n |= n >> 1 + n |= n >> 2 + n |= n >> 4 + n |= n >> 8 + n |= n >> 16 + n |= n >> 32 + n++ + return n +} + +// Grow grows Writer's internal buffer capacity to guarantee space for another +// n bytes of _payload_ -- that is, frame header is not included in n. +func (w *Writer) Grow(n int) { + // NOTE: we must respect the possibility of header reserved bytes grow. + var ( + size = len(w.raw) + prevOffset = len(w.raw) - len(w.buf) + nextOffset = len(w.raw) - len(w.buf) + buffered = w.Buffered() + ) + for cap := size - nextOffset - buffered; cap < n; { + // This loop runs twice only at split cases, when reservation of raw + // buffer space for the header shrinks capacity of new buffer such that + // it still less than n. + // + // Loop is safe here because: + // - (offset + buffered + n) is greater than size, otherwise (cap < n) + // would be false: + // size = offset + buffered + freeSpace (cap) + // size' = offset + buffered + wantSpace (n) + // Since (cap < n) is true in the loop condition, size' is guaranteed + // to be greater => no infinite loop. + size = ceilPowerOfTwo(nextOffset + buffered + n) + nextOffset = reserve(w.state, size) + cap = size - nextOffset - buffered + } + if size < len(w.raw) { + panic("wsutil: buffer grow leads to its reduce") + } + if size == len(w.raw) { + return + } + p := make([]byte, size) + copy(p[nextOffset-prevOffset:], w.raw[:prevOffset+buffered]) + w.raw = p + w.buf = w.raw[nextOffset:] +} + // WriteThrough writes data bypassing the buffer. // Note that Writer's buffer must be empty before calling WriteThrough(). func (w *Writer) WriteThrough(p []byte) (n int, err error) { @@ -305,13 +384,37 @@ func (w *Writer) WriteThrough(p []byte) (n int, err error) { return 0, ErrNotEmpty } - w.err = writeFrame(w.dest, w.state, w.opCode(), false, p) + var frame ws.Frame + frame.Header = ws.Header{ + OpCode: w.opCode(), + Fin: false, + Length: int64(len(p)), + } + for _, x := range w.extensions { + frame.Header, err = x.SetBits(frame.Header) + if err != nil { + return 0, err + } + } + if w.state.ClientSide() { + // Should copy bytes to prevent corruption of caller data. + payload := pbytes.GetLen(len(p)) + defer pbytes.Put(payload) + copy(payload, p) + + frame.Payload = payload + frame = ws.MaskFrameInPlace(frame) + } else { + frame.Payload = p + } + + w.err = ws.WriteFrame(w.dest, frame) if w.err == nil { n = len(p) } w.dirty = true - w.fragmented = true + w.fseq++ return n, w.err } @@ -321,7 +424,11 @@ func (w *Writer) ReadFrom(src io.Reader) (n int64, err error) { var nn int for err == nil { if w.Available() == 0 { - err = w.FlushFragment() + if w.noFlush { + w.Grow(w.Buffered()) // Twice bigger. + } else { + err = w.FlushFragment() + } continue } @@ -367,7 +474,7 @@ func (w *Writer) Flush() error { w.err = w.flushFragment(true) w.n = 0 w.dirty = false - w.fragmented = false + w.fseq = 0 return w.err } @@ -381,35 +488,49 @@ func (w *Writer) FlushFragment() error { w.err = w.flushFragment(false) w.n = 0 - w.fragmented = true + w.fseq++ return w.err } -func (w *Writer) flushFragment(fin bool) error { - frame := ws.NewFrame(w.opCode(), fin, w.buf[:w.n]) +func (w *Writer) flushFragment(fin bool) (err error) { + var ( + payload = w.buf[:w.n] + header = ws.Header{ + OpCode: w.opCode(), + Fin: fin, + Length: int64(len(payload)), + } + ) + for _, ext := range w.extensions { + header, err = ext.SetBits(header) + if err != nil { + return err + } + } if w.state.ClientSide() { - frame = ws.MaskFrameInPlace(frame) + header.Masked = true + header.Mask = ws.NewMask() + ws.Cipher(payload, header.Mask, 0) } - // Write header to the header segment of the raw buffer. - head := len(w.raw) - len(w.buf) - offset := head - ws.HeaderSize(frame.Header) + var ( + offset = len(w.raw) - len(w.buf) + skip = offset - ws.HeaderSize(header) + ) buf := bytesWriter{ - buf: w.raw[offset:head], + buf: w.raw[skip:offset], } - if err := ws.WriteHeader(&buf, frame.Header); err != nil { + if err := ws.WriteHeader(&buf, header); err != nil { // Must never be reached. panic("dump header error: " + err.Error()) } - - _, err := w.dest.Write(w.raw[offset : head+w.n]) - + _, err = w.dest.Write(w.raw[skip : offset+w.n]) return err } func (w *Writer) opCode() ws.OpCode { - if w.fragmented { + if w.fseq > 0 { return ws.OpContinuation } return w.op @@ -448,3 +569,31 @@ func writeFrame(w io.Writer, s ws.State, op ws.OpCode, fin bool, p []byte) error return ws.WriteFrame(w, frame) } + +// reserve calculates number of bytes need to be reserved for frame header. +// +// Note that instead of ws.HeaderSize() it does calculation based on the buffer +// size, not the payload size. +func reserve(state ws.State, n int) (offset int) { + var mask int + if state.ClientSide() { + mask = 4 + } + switch { + case n <= int(len7)+mask+2: + return mask + 2 + case n <= int(len16)+mask+4: + return mask + 4 + default: + return mask + 10 + } +} + +// headerSize returns number of bytes needed to encode header of a frame with +// given state and length. +func headerSize(s ws.State, n int) int { + return ws.HeaderSize(ws.Header{ + Length: int64(n), + Masked: s.ClientSide(), + }) +} diff --git a/vendor/github.com/gobwas/ws/wsutil/wsutil.go b/vendor/github.com/gobwas/ws/wsutil/wsutil.go index ffd43367..86211f3e 100644 --- a/vendor/github.com/gobwas/ws/wsutil/wsutil.go +++ b/vendor/github.com/gobwas/ws/wsutil/wsutil.go @@ -3,54 +3,54 @@ Package wsutil provides utilities for working with WebSocket protocol. Overview: - // Read masked text message from peer and check utf8 encoding. - header, err := ws.ReadHeader(conn) - if err != nil { - // handle err - } + // Read masked text message from peer and check utf8 encoding. + header, err := ws.ReadHeader(conn) + if err != nil { + // handle err + } - // Prepare to read payload. - r := io.LimitReader(conn, header.Length) - r = wsutil.NewCipherReader(r, header.Mask) - r = wsutil.NewUTF8Reader(r) + // Prepare to read payload. + r := io.LimitReader(conn, header.Length) + r = wsutil.NewCipherReader(r, header.Mask) + r = wsutil.NewUTF8Reader(r) - payload, err := ioutil.ReadAll(r) - if err != nil { - // handle err - } + payload, err := ioutil.ReadAll(r) + if err != nil { + // handle err + } You could get the same behavior using just `wsutil.Reader`: - r := wsutil.Reader{ - Source: conn, - CheckUTF8: true, - } + r := wsutil.Reader{ + Source: conn, + CheckUTF8: true, + } - payload, err := ioutil.ReadAll(r) - if err != nil { - // handle err - } + payload, err := ioutil.ReadAll(r) + if err != nil { + // handle err + } Or even simplest: - payload, err := wsutil.ReadClientText(conn) - if err != nil { - // handle err - } + payload, err := wsutil.ReadClientText(conn) + if err != nil { + // handle err + } Package is also exports tools for buffered writing: - // Create buffered writer, that will buffer output bytes and send them as - // 128-length fragments (with exception on large writes, see the doc). - writer := wsutil.NewWriterSize(conn, ws.StateServerSide, ws.OpText, 128) + // Create buffered writer, that will buffer output bytes and send them as + // 128-length fragments (with exception on large writes, see the doc). + writer := wsutil.NewWriterSize(conn, ws.StateServerSide, ws.OpText, 128) - _, err := io.CopyN(writer, rand.Reader, 100) - if err == nil { - err = writer.Flush() - } - if err != nil { - // handle error - } + _, err := io.CopyN(writer, rand.Reader, 100) + if err == nil { + err = writer.Flush() + } + if err != nil { + // handle error + } For more utils and helpers see the documentation. */ diff --git a/vendor/github.com/golang-collections/collections/LICENSE b/vendor/github.com/golang-collections/collections/LICENSE deleted file mode 100644 index 863a984d..00000000 --- a/vendor/github.com/golang-collections/collections/LICENSE +++ /dev/null @@ -1,20 +0,0 @@ -Copyright (c) 2012 Caleb Doxsey - -Permission is hereby granted, free of charge, to any person obtaining -a copy of this software and associated documentation files (the -"Software"), to deal in the Software without restriction, including -without limitation the rights to use, copy, modify, merge, publish, -distribute, sublicense, and/or sell copies of the Software, and to -permit persons to whom the Software is furnished to do so, subject to -the following conditions: - -The above copyright notice and this permission notice shall be included -in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF -MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. -IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY -CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, -TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE -SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/golang-collections/collections/queue/queue.go b/vendor/github.com/golang-collections/collections/queue/queue.go deleted file mode 100644 index ae173487..00000000 --- a/vendor/github.com/golang-collections/collections/queue/queue.go +++ /dev/null @@ -1,55 +0,0 @@ -package queue - -type ( - Queue struct { - start, end *node - length int - } - node struct { - value interface{} - next *node - } -) - -// Create a new queue -func New() *Queue { - return &Queue{nil,nil,0} -} -// Take the next item off the front of the queue -func (this *Queue) Dequeue() interface{} { - if this.length == 0 { - return nil - } - n := this.start - if this.length == 1 { - this.start = nil - this.end = nil - } else { - this.start = this.start.next - } - this.length-- - return n.value -} -// Put an item on the end of a queue -func (this *Queue) Enqueue(value interface{}) { - n := &node{value,nil} - if this.length == 0 { - this.start = n - this.end = n - } else { - this.end.next = n - this.end = n - } - this.length++ -} -// Return the number of items in the queue -func (this *Queue) Len() int { - return this.length -} -// Return the first item in the queue without removing it -func (this *Queue) Peek() interface{} { - if this.length == 0 { - return nil - } - return this.start.value -} diff --git a/vendor/github.com/google/pprof/profile/encode.go b/vendor/github.com/google/pprof/profile/encode.go index ab7f03ae..182c926b 100644 --- a/vendor/github.com/google/pprof/profile/encode.go +++ b/vendor/github.com/google/pprof/profile/encode.go @@ -17,6 +17,7 @@ package profile import ( "errors" "sort" + "strings" ) func (p *Profile) decoder() []decoder { @@ -183,12 +184,13 @@ var profileDecoder = []decoder{ // repeated Location location = 4 func(b *buffer, m message) error { x := new(Location) - x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer + x.Line = b.tmpLines[:0] // Use shared space temporarily pp := m.(*Profile) pp.Location = append(pp.Location, x) err := decodeMessage(b, x) - var tmp []Line - x.Line = append(tmp, x.Line...) // Shrink to allocated size + b.tmpLines = x.Line[:0] + // Copy to shrink size and detach from shared space. + x.Line = append([]Line(nil), x.Line...) return err }, // repeated Function function = 5 @@ -252,6 +254,14 @@ func (p *Profile) postDecode() error { } else { mappings[m.ID] = m } + + // If this a main linux kernel mapping with a relocation symbol suffix + // ("[kernel.kallsyms]_text"), extract said suffix. + // It is fairly hacky to handle at this level, but the alternatives appear even worse. + const prefix = "[kernel.kallsyms]" + if strings.HasPrefix(m.File, prefix) { + m.KernelRelocationSymbol = m.File[len(prefix):] + } } functions := make(map[uint64]*Function, len(p.Function)) @@ -298,41 +308,52 @@ func (p *Profile) postDecode() error { st.Unit, err = getString(p.stringTable, &st.unitX, err) } + // Pre-allocate space for all locations. + numLocations := 0 for _, s := range p.Sample { - labels := make(map[string][]string, len(s.labelX)) - numLabels := make(map[string][]int64, len(s.labelX)) - numUnits := make(map[string][]string, len(s.labelX)) - for _, l := range s.labelX { - var key, value string - key, err = getString(p.stringTable, &l.keyX, err) - if l.strX != 0 { - value, err = getString(p.stringTable, &l.strX, err) - labels[key] = append(labels[key], value) - } else if l.numX != 0 || l.unitX != 0 { - numValues := numLabels[key] - units := numUnits[key] - if l.unitX != 0 { - var unit string - unit, err = getString(p.stringTable, &l.unitX, err) - units = padStringArray(units, len(numValues)) - numUnits[key] = append(units, unit) - } - numLabels[key] = append(numLabels[key], l.numX) - } - } - if len(labels) > 0 { - s.Label = labels - } - if len(numLabels) > 0 { - s.NumLabel = numLabels - for key, units := range numUnits { - if len(units) > 0 { - numUnits[key] = padStringArray(units, len(numLabels[key])) + numLocations += len(s.locationIDX) + } + locBuffer := make([]*Location, numLocations) + + for _, s := range p.Sample { + if len(s.labelX) > 0 { + labels := make(map[string][]string, len(s.labelX)) + numLabels := make(map[string][]int64, len(s.labelX)) + numUnits := make(map[string][]string, len(s.labelX)) + for _, l := range s.labelX { + var key, value string + key, err = getString(p.stringTable, &l.keyX, err) + if l.strX != 0 { + value, err = getString(p.stringTable, &l.strX, err) + labels[key] = append(labels[key], value) + } else if l.numX != 0 || l.unitX != 0 { + numValues := numLabels[key] + units := numUnits[key] + if l.unitX != 0 { + var unit string + unit, err = getString(p.stringTable, &l.unitX, err) + units = padStringArray(units, len(numValues)) + numUnits[key] = append(units, unit) + } + numLabels[key] = append(numLabels[key], l.numX) } } - s.NumUnit = numUnits + if len(labels) > 0 { + s.Label = labels + } + if len(numLabels) > 0 { + s.NumLabel = numLabels + for key, units := range numUnits { + if len(units) > 0 { + numUnits[key] = padStringArray(units, len(numLabels[key])) + } + } + s.NumUnit = numUnits + } } - s.Location = make([]*Location, len(s.locationIDX)) + + s.Location = locBuffer[:len(s.locationIDX)] + locBuffer = locBuffer[len(s.locationIDX):] for i, lid := range s.locationIDX { if lid < uint64(len(locationIds)) { s.Location[i] = locationIds[lid] diff --git a/vendor/github.com/google/pprof/profile/filter.go b/vendor/github.com/google/pprof/profile/filter.go index ea8e66c6..c794b939 100644 --- a/vendor/github.com/google/pprof/profile/filter.go +++ b/vendor/github.com/google/pprof/profile/filter.go @@ -22,6 +22,10 @@ import "regexp" // samples where at least one frame matches focus but none match ignore. // Returns true is the corresponding regexp matched at least one sample. func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) { + if focus == nil && ignore == nil && hide == nil && show == nil { + fm = true // Missing focus implies a match + return + } focusOrIgnore := make(map[uint64]bool) hidden := make(map[uint64]bool) for _, l := range p.Location { diff --git a/vendor/github.com/google/pprof/profile/legacy_profile.go b/vendor/github.com/google/pprof/profile/legacy_profile.go index 0c8f3bb5..8d07fd6c 100644 --- a/vendor/github.com/google/pprof/profile/legacy_profile.go +++ b/vendor/github.com/google/pprof/profile/legacy_profile.go @@ -295,11 +295,12 @@ func get64b(b []byte) (uint64, []byte) { // // The general format for profilez samples is a sequence of words in // binary format. The first words are a header with the following data: -// 1st word -- 0 -// 2nd word -- 3 -// 3rd word -- 0 if a c++ application, 1 if a java application. -// 4th word -- Sampling period (in microseconds). -// 5th word -- Padding. +// +// 1st word -- 0 +// 2nd word -- 3 +// 3rd word -- 0 if a c++ application, 1 if a java application. +// 4th word -- Sampling period (in microseconds). +// 5th word -- Padding. func parseCPU(b []byte) (*Profile, error) { var parse func([]byte) (uint64, []byte) var n1, n2, n3, n4, n5 uint64 @@ -403,15 +404,18 @@ func cleanupDuplicateLocations(p *Profile) { // // profilez samples are a repeated sequence of stack frames of the // form: -// 1st word -- The number of times this stack was encountered. -// 2nd word -- The size of the stack (StackSize). -// 3rd word -- The first address on the stack. -// ... -// StackSize + 2 -- The last address on the stack +// +// 1st word -- The number of times this stack was encountered. +// 2nd word -- The size of the stack (StackSize). +// 3rd word -- The first address on the stack. +// ... +// StackSize + 2 -- The last address on the stack +// // The last stack trace is of the form: -// 1st word -- 0 -// 2nd word -- 1 -// 3rd word -- 0 +// +// 1st word -- 0 +// 2nd word -- 1 +// 3rd word -- 0 // // Addresses from stack traces may point to the next instruction after // each call. Optionally adjust by -1 to land somewhere on the actual @@ -861,7 +865,6 @@ func parseThread(b []byte) (*Profile, error) { // Recognize each thread and populate profile samples. for !isMemoryMapSentinel(line) { if strings.HasPrefix(line, "---- no stack trace for") { - line = "" break } if t := threadStartRE.FindStringSubmatch(line); len(t) != 4 { diff --git a/vendor/github.com/google/pprof/profile/merge.go b/vendor/github.com/google/pprof/profile/merge.go index 9978e733..4b66282c 100644 --- a/vendor/github.com/google/pprof/profile/merge.go +++ b/vendor/github.com/google/pprof/profile/merge.go @@ -15,6 +15,7 @@ package profile import ( + "encoding/binary" "fmt" "sort" "strconv" @@ -58,7 +59,7 @@ func Merge(srcs []*Profile) (*Profile, error) { for _, src := range srcs { // Clear the profile-specific hash tables - pm.locationsByID = make(map[uint64]*Location, len(src.Location)) + pm.locationsByID = makeLocationIDMap(len(src.Location)) pm.functionsByID = make(map[uint64]*Function, len(src.Function)) pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping)) @@ -136,7 +137,7 @@ type profileMerger struct { p *Profile // Memoization tables within a profile. - locationsByID map[uint64]*Location + locationsByID locationIDMap functionsByID map[uint64]*Function mappingsByID map[uint64]mapInfo @@ -153,6 +154,16 @@ type mapInfo struct { } func (pm *profileMerger) mapSample(src *Sample) *Sample { + // Check memoization table + k := pm.sampleKey(src) + if ss, ok := pm.samples[k]; ok { + for i, v := range src.Value { + ss.Value[i] += v + } + return ss + } + + // Make new sample. s := &Sample{ Location: make([]*Location, len(src.Location)), Value: make([]int64, len(src.Value)), @@ -177,52 +188,98 @@ func (pm *profileMerger) mapSample(src *Sample) *Sample { s.NumLabel[k] = vv s.NumUnit[k] = uu } - // Check memoization table. Must be done on the remapped location to - // account for the remapped mapping. Add current values to the - // existing sample. - k := s.key() - if ss, ok := pm.samples[k]; ok { - for i, v := range src.Value { - ss.Value[i] += v - } - return ss - } copy(s.Value, src.Value) pm.samples[k] = s pm.p.Sample = append(pm.p.Sample, s) return s } -// key generates sampleKey to be used as a key for maps. -func (sample *Sample) key() sampleKey { - ids := make([]string, len(sample.Location)) - for i, l := range sample.Location { - ids[i] = strconv.FormatUint(l.ID, 16) +func (pm *profileMerger) sampleKey(sample *Sample) sampleKey { + // Accumulate contents into a string. + var buf strings.Builder + buf.Grow(64) // Heuristic to avoid extra allocs + + // encode a number + putNumber := func(v uint64) { + var num [binary.MaxVarintLen64]byte + n := binary.PutUvarint(num[:], v) + buf.Write(num[:n]) } - labels := make([]string, 0, len(sample.Label)) - for k, v := range sample.Label { - labels = append(labels, fmt.Sprintf("%q%q", k, v)) + // encode a string prefixed with its length. + putDelimitedString := func(s string) { + putNumber(uint64(len(s))) + buf.WriteString(s) } - sort.Strings(labels) - numlabels := make([]string, 0, len(sample.NumLabel)) - for k, v := range sample.NumLabel { - numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k])) + for _, l := range sample.Location { + // Get the location in the merged profile, which may have a different ID. + if loc := pm.mapLocation(l); loc != nil { + putNumber(loc.ID) + } } - sort.Strings(numlabels) + putNumber(0) // Delimiter - return sampleKey{ - strings.Join(ids, "|"), - strings.Join(labels, ""), - strings.Join(numlabels, ""), + for _, l := range sortedKeys1(sample.Label) { + putDelimitedString(l) + values := sample.Label[l] + putNumber(uint64(len(values))) + for _, v := range values { + putDelimitedString(v) + } } + + for _, l := range sortedKeys2(sample.NumLabel) { + putDelimitedString(l) + values := sample.NumLabel[l] + putNumber(uint64(len(values))) + for _, v := range values { + putNumber(uint64(v)) + } + units := sample.NumUnit[l] + putNumber(uint64(len(units))) + for _, v := range units { + putDelimitedString(v) + } + } + + return sampleKey(buf.String()) } -type sampleKey struct { - locations string - labels string - numlabels string +type sampleKey string + +// sortedKeys1 returns the sorted keys found in a string->[]string map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys2 and made into a generic function. +func sortedKeys1(m map[string][]string) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +// sortedKeys2 returns the sorted keys found in a string->[]int64 map. +// +// Note: this is currently non-generic since github pprof runs golint, +// which does not support generics. When that issue is fixed, it can +// be merged with sortedKeys1 and made into a generic function. +func sortedKeys2(m map[string][]int64) []string { + if len(m) == 0 { + return nil + } + keys := make([]string, 0, len(m)) + for k := range m { + keys = append(keys, k) + } + sort.Strings(keys) + return keys } func (pm *profileMerger) mapLocation(src *Location) *Location { @@ -230,7 +287,7 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { return nil } - if l, ok := pm.locationsByID[src.ID]; ok { + if l := pm.locationsByID.get(src.ID); l != nil { return l } @@ -249,10 +306,10 @@ func (pm *profileMerger) mapLocation(src *Location) *Location { // account for the remapped mapping ID. k := l.key() if ll, ok := pm.locations[k]; ok { - pm.locationsByID[src.ID] = ll + pm.locationsByID.set(src.ID, ll) return ll } - pm.locationsByID[src.ID] = l + pm.locationsByID.set(src.ID, l) pm.locations[k] = l pm.p.Location = append(pm.p.Location, l) return l @@ -303,16 +360,17 @@ func (pm *profileMerger) mapMapping(src *Mapping) mapInfo { return mi } m := &Mapping{ - ID: uint64(len(pm.p.Mapping) + 1), - Start: src.Start, - Limit: src.Limit, - Offset: src.Offset, - File: src.File, - BuildID: src.BuildID, - HasFunctions: src.HasFunctions, - HasFilenames: src.HasFilenames, - HasLineNumbers: src.HasLineNumbers, - HasInlineFrames: src.HasInlineFrames, + ID: uint64(len(pm.p.Mapping) + 1), + Start: src.Start, + Limit: src.Limit, + Offset: src.Offset, + File: src.File, + KernelRelocationSymbol: src.KernelRelocationSymbol, + BuildID: src.BuildID, + HasFunctions: src.HasFunctions, + HasFilenames: src.HasFilenames, + HasLineNumbers: src.HasLineNumbers, + HasInlineFrames: src.HasInlineFrames, } pm.p.Mapping = append(pm.p.Mapping, m) @@ -479,3 +537,131 @@ func (p *Profile) compatible(pb *Profile) error { func equalValueType(st1, st2 *ValueType) bool { return st1.Type == st2.Type && st1.Unit == st2.Unit } + +// locationIDMap is like a map[uint64]*Location, but provides efficiency for +// ids that are densely numbered, which is often the case. +type locationIDMap struct { + dense []*Location // indexed by id for id < len(dense) + sparse map[uint64]*Location // indexed by id for id >= len(dense) +} + +func makeLocationIDMap(n int) locationIDMap { + return locationIDMap{ + dense: make([]*Location, n), + sparse: map[uint64]*Location{}, + } +} + +func (lm locationIDMap) get(id uint64) *Location { + if id < uint64(len(lm.dense)) { + return lm.dense[int(id)] + } + return lm.sparse[id] +} + +func (lm locationIDMap) set(id uint64, loc *Location) { + if id < uint64(len(lm.dense)) { + lm.dense[id] = loc + return + } + lm.sparse[id] = loc +} + +// CompatibilizeSampleTypes makes profiles compatible to be compared/merged. It +// keeps sample types that appear in all profiles only and drops/reorders the +// sample types as necessary. +// +// In the case of sample types order is not the same for given profiles the +// order is derived from the first profile. +// +// Profiles are modified in-place. +// +// It returns an error if the sample type's intersection is empty. +func CompatibilizeSampleTypes(ps []*Profile) error { + sTypes := commonSampleTypes(ps) + if len(sTypes) == 0 { + return fmt.Errorf("profiles have empty common sample type list") + } + for _, p := range ps { + if err := compatibilizeSampleTypes(p, sTypes); err != nil { + return err + } + } + return nil +} + +// commonSampleTypes returns sample types that appear in all profiles in the +// order how they ordered in the first profile. +func commonSampleTypes(ps []*Profile) []string { + if len(ps) == 0 { + return nil + } + sTypes := map[string]int{} + for _, p := range ps { + for _, st := range p.SampleType { + sTypes[st.Type]++ + } + } + var res []string + for _, st := range ps[0].SampleType { + if sTypes[st.Type] == len(ps) { + res = append(res, st.Type) + } + } + return res +} + +// compatibilizeSampleTypes drops sample types that are not present in sTypes +// list and reorder them if needed. +// +// It sets DefaultSampleType to sType[0] if it is not in sType list. +// +// It assumes that all sample types from the sTypes list are present in the +// given profile otherwise it returns an error. +func compatibilizeSampleTypes(p *Profile, sTypes []string) error { + if len(sTypes) == 0 { + return fmt.Errorf("sample type list is empty") + } + defaultSampleType := sTypes[0] + reMap, needToModify := make([]int, len(sTypes)), false + for i, st := range sTypes { + if st == p.DefaultSampleType { + defaultSampleType = p.DefaultSampleType + } + idx := searchValueType(p.SampleType, st) + if idx < 0 { + return fmt.Errorf("%q sample type is not found in profile", st) + } + reMap[i] = idx + if idx != i { + needToModify = true + } + } + if !needToModify && len(sTypes) == len(p.SampleType) { + return nil + } + p.DefaultSampleType = defaultSampleType + oldSampleTypes := p.SampleType + p.SampleType = make([]*ValueType, len(sTypes)) + for i, idx := range reMap { + p.SampleType[i] = oldSampleTypes[idx] + } + values := make([]int64, len(sTypes)) + for _, s := range p.Sample { + for i, idx := range reMap { + values[i] = s.Value[idx] + } + s.Value = s.Value[:len(values)] + copy(s.Value, values) + } + return nil +} + +func searchValueType(vts []*ValueType, s string) int { + for i, vt := range vts { + if vt.Type == s { + return i + } + } + return -1 +} diff --git a/vendor/github.com/google/pprof/profile/profile.go b/vendor/github.com/google/pprof/profile/profile.go index 2590c8dd..60ef7e92 100644 --- a/vendor/github.com/google/pprof/profile/profile.go +++ b/vendor/github.com/google/pprof/profile/profile.go @@ -21,7 +21,6 @@ import ( "compress/gzip" "fmt" "io" - "io/ioutil" "math" "path/filepath" "regexp" @@ -73,9 +72,23 @@ type ValueType struct { type Sample struct { Location []*Location Value []int64 - Label map[string][]string + // Label is a per-label-key map to values for string labels. + // + // In general, having multiple values for the given label key is strongly + // discouraged - see docs for the sample label field in profile.proto. The + // main reason this unlikely state is tracked here is to make the + // decoding->encoding roundtrip not lossy. But we expect that the value + // slices present in this map are always of length 1. + Label map[string][]string + // NumLabel is a per-label-key map to values for numeric labels. See a note + // above on handling multiple values for a label. NumLabel map[string][]int64 - NumUnit map[string][]string + // NumUnit is a per-label-key map to the unit names of corresponding numeric + // label values. The unit info may be missing even if the label is in + // NumLabel, see the docs in profile.proto for details. When the value is + // slice is present and not nil, its length must be equal to the length of + // the corresponding value slice in NumLabel. + NumUnit map[string][]string locationIDX []uint64 labelX []label @@ -106,6 +119,15 @@ type Mapping struct { fileX int64 buildIDX int64 + + // Name of the kernel relocation symbol ("_text" or "_stext"), extracted from File. + // For linux kernel mappings generated by some tools, correct symbolization depends + // on knowing which of the two possible relocation symbols was used for `Start`. + // This is given to us as a suffix in `File` (e.g. "[kernel.kallsyms]_stext"). + // + // Note, this public field is not persisted in the proto. For the purposes of + // copying / merging / hashing profiles, it is considered subsumed by `File`. + KernelRelocationSymbol string } // Location corresponds to Profile.Location @@ -144,7 +166,7 @@ type Function struct { // may be a gzip-compressed encoded protobuf or one of many legacy // profile formats which may be unsupported in the future. func Parse(r io.Reader) (*Profile, error) { - data, err := ioutil.ReadAll(r) + data, err := io.ReadAll(r) if err != nil { return nil, err } @@ -159,7 +181,7 @@ func ParseData(data []byte) (*Profile, error) { if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b { gz, err := gzip.NewReader(bytes.NewBuffer(data)) if err == nil { - data, err = ioutil.ReadAll(gz) + data, err = io.ReadAll(gz) } if err != nil { return nil, fmt.Errorf("decompressing profile: %v", err) @@ -707,6 +729,35 @@ func (s *Sample) HasLabel(key, value string) bool { return false } +// SetNumLabel sets the specified key to the specified value for all samples in the +// profile. "unit" is a slice that describes the units that each corresponding member +// of "values" is measured in (e.g. bytes or seconds). If there is no relevant +// unit for a given value, that member of "unit" should be the empty string. +// "unit" must either have the same length as "value", or be nil. +func (p *Profile) SetNumLabel(key string, value []int64, unit []string) { + for _, sample := range p.Sample { + if sample.NumLabel == nil { + sample.NumLabel = map[string][]int64{key: value} + } else { + sample.NumLabel[key] = value + } + if sample.NumUnit == nil { + sample.NumUnit = map[string][]string{key: unit} + } else { + sample.NumUnit[key] = unit + } + } +} + +// RemoveNumLabel removes all numerical labels associated with the specified key for all +// samples in the profile. +func (p *Profile) RemoveNumLabel(key string) { + for _, sample := range p.Sample { + delete(sample.NumLabel, key) + delete(sample.NumUnit, key) + } +} + // DiffBaseSample returns true if a sample belongs to the diff base and false // otherwise. func (s *Sample) DiffBaseSample() bool { diff --git a/vendor/github.com/google/pprof/profile/proto.go b/vendor/github.com/google/pprof/profile/proto.go index 539ad3ab..a15696ba 100644 --- a/vendor/github.com/google/pprof/profile/proto.go +++ b/vendor/github.com/google/pprof/profile/proto.go @@ -39,11 +39,12 @@ import ( ) type buffer struct { - field int // field tag - typ int // proto wire type code for field - u64 uint64 - data []byte - tmp [16]byte + field int // field tag + typ int // proto wire type code for field + u64 uint64 + data []byte + tmp [16]byte + tmpLines []Line // temporary storage used while decoding "repeated Line". } type decoder func(*buffer, message) error @@ -286,7 +287,6 @@ func decodeInt64s(b *buffer, x *[]int64) error { if b.typ == 2 { // Packed encoding data := b.data - tmp := make([]int64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -294,9 +294,8 @@ func decodeInt64s(b *buffer, x *[]int64) error { if u, data, err = decodeVarint(data); err != nil { return err } - tmp = append(tmp, int64(u)) + *x = append(*x, int64(u)) } - *x = append(*x, tmp...) return nil } var i int64 @@ -319,7 +318,6 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if b.typ == 2 { data := b.data // Packed encoding - tmp := make([]uint64, 0, len(data)) // Maximally sized for len(data) > 0 { var u uint64 var err error @@ -327,9 +325,8 @@ func decodeUint64s(b *buffer, x *[]uint64) error { if u, data, err = decodeVarint(data); err != nil { return err } - tmp = append(tmp, u) + *x = append(*x, u) } - *x = append(*x, tmp...) return nil } var u uint64 diff --git a/vendor/github.com/google/pprof/profile/prune.go b/vendor/github.com/google/pprof/profile/prune.go index 02d21a81..b2f9fd54 100644 --- a/vendor/github.com/google/pprof/profile/prune.go +++ b/vendor/github.com/google/pprof/profile/prune.go @@ -62,15 +62,31 @@ func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) { prune := make(map[uint64]bool) pruneBeneath := make(map[uint64]bool) + // simplifyFunc can be expensive, so cache results. + // Note that the same function name can be encountered many times due + // different lines and addresses in the same function. + pruneCache := map[string]bool{} // Map from function to whether or not to prune + pruneFromHere := func(s string) bool { + if r, ok := pruneCache[s]; ok { + return r + } + funcName := simplifyFunc(s) + if dropRx.MatchString(funcName) { + if keepRx == nil || !keepRx.MatchString(funcName) { + pruneCache[s] = true + return true + } + } + pruneCache[s] = false + return false + } + for _, loc := range p.Location { var i int for i = len(loc.Line) - 1; i >= 0; i-- { if fn := loc.Line[i].Function; fn != nil && fn.Name != "" { - funcName := simplifyFunc(fn.Name) - if dropRx.MatchString(funcName) { - if keepRx == nil || !keepRx.MatchString(funcName) { - break - } + if pruneFromHere(fn.Name) { + break } } } diff --git a/vendor/github.com/miekg/dns/LICENSE b/vendor/github.com/miekg/dns/LICENSE index 55f12ab7..852ab9ce 100644 --- a/vendor/github.com/miekg/dns/LICENSE +++ b/vendor/github.com/miekg/dns/LICENSE @@ -1,30 +1,29 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. +BSD 3-Clause License + +Copyright (c) 2009, The Go Authors. Extensions copyright (c) 2011, Miek Gieben. +All rights reserved. Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -As this is fork of the official Go code the same license applies. -Extensions of the original work are copyright (c) 2011 Miek Gieben diff --git a/vendor/github.com/miekg/dns/README.md b/vendor/github.com/miekg/dns/README.md index 5a799d88..e57d86af 100644 --- a/vendor/github.com/miekg/dns/README.md +++ b/vendor/github.com/miekg/dns/README.md @@ -77,6 +77,12 @@ A not-so-up-to-date-list-that-may-be-actually-current: * https://ping.sx/dig * https://fleetdeck.io/ * https://github.com/markdingo/autoreverse +* https://github.com/slackhq/nebula +* https://addr.tools/ +* https://dnscheck.tools/ +* https://github.com/egbakou/domainverifier +* https://github.com/semihalev/sdns +* https://github.com/wintbiit/NineDNS Send pull request if you want to be listed here. @@ -120,6 +126,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. *all of them* * 103{4,5} - DNS standard +* 1183 - ISDN, X25 and other deprecated records * 1348 - NSAP record (removed the record) * 1982 - Serial Arithmetic * 1876 - LOC record @@ -140,6 +147,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 340{1,2,3} - NAPTR record * 3445 - Limiting the scope of (DNS)KEY * 3597 - Unknown RRs +* 4025 - A Method for Storing IPsec Keying Material in DNS * 403{3,4,5} - DNSSEC + validation functions * 4255 - SSHFP record * 4343 - Case insensitivity @@ -175,6 +183,7 @@ Example programs can be found in the `github.com/miekg/exdns` repository. * 8080 - EdDSA for DNSSEC * 8499 - DNS Terminology * 8659 - DNS Certification Authority Authorization (CAA) Resource Record +* 8777 - DNS Reverse IP Automatic Multicast Tunneling (AMT) Discovery * 8914 - Extended DNS Errors * 8976 - Message Digest for DNS Zones (ZONEMD RR) diff --git a/vendor/github.com/miekg/dns/acceptfunc.go b/vendor/github.com/miekg/dns/acceptfunc.go index ac479db9..1a59a854 100644 --- a/vendor/github.com/miekg/dns/acceptfunc.go +++ b/vendor/github.com/miekg/dns/acceptfunc.go @@ -10,8 +10,6 @@ type MsgAcceptFunc func(dh Header) MsgAcceptAction // // * opcode isn't OpcodeQuery or OpcodeNotify // -// * Zero bit isn't zero -// // * does not have exactly 1 question in the question section // // * has more than 1 RR in the Answer section @@ -19,7 +17,6 @@ type MsgAcceptFunc func(dh Header) MsgAcceptAction // * has more than 0 RRs in the Authority section // // * has more than 2 RRs in the Additional section -// var DefaultMsgAcceptFunc MsgAcceptFunc = defaultMsgAcceptFunc // MsgAcceptAction represents the action to be taken. diff --git a/vendor/github.com/miekg/dns/client.go b/vendor/github.com/miekg/dns/client.go index 9aa65853..9549fa92 100644 --- a/vendor/github.com/miekg/dns/client.go +++ b/vendor/github.com/miekg/dns/client.go @@ -6,7 +6,6 @@ import ( "context" "crypto/tls" "encoding/binary" - "fmt" "io" "net" "strings" @@ -56,14 +55,20 @@ type Client struct { // Timeout is a cumulative timeout for dial, write and read, defaults to 0 (disabled) - overrides DialTimeout, ReadTimeout, // WriteTimeout when non-zero. Can be overridden with net.Dialer.Timeout (see Client.ExchangeWithDialer and // Client.Dialer) or context.Context.Deadline (see ExchangeContext) - Timeout time.Duration - DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero - ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero - TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) - TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations. - SingleInflight bool // if true suppress multiple outstanding queries for the same Qname, Qtype and Qclass - group singleflight + Timeout time.Duration + DialTimeout time.Duration // net.DialTimeout, defaults to 2 seconds, or net.Dialer.Timeout if expiring earlier - overridden by Timeout when that value is non-zero + ReadTimeout time.Duration // net.Conn.SetReadTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + WriteTimeout time.Duration // net.Conn.SetWriteTimeout value for connections, defaults to 2 seconds - overridden by Timeout when that value is non-zero + TsigSecret map[string]string // secret(s) for Tsig map[], zonename must be in canonical form (lowercase, fqdn, see RFC 4034 Section 6.2) + TsigProvider TsigProvider // An implementation of the TsigProvider interface. If defined it replaces TsigSecret and is used for all TSIG operations. + + // SingleInflight previously serialised multiple concurrent queries for the + // same Qname, Qtype and Qclass to ensure only one would be in flight at a + // time. + // + // Deprecated: This is a no-op. Callers should implement their own in flight + // query caching if needed. See github.com/miekg/dns/issues/1449. + SingleInflight bool } // Exchange performs a synchronous UDP query. It sends the message m to the address @@ -106,7 +111,6 @@ func (c *Client) Dial(address string) (conn *Conn, err error) { } // DialContext connects to the address on the named network, with a context.Context. -// For TLS over TCP (DoT) the context isn't used yet. This will be enabled when Go 1.18 is released. func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, err error) { // create a new dialer with the appropriate timeout var d net.Dialer @@ -127,15 +131,11 @@ func (c *Client) DialContext(ctx context.Context, address string) (conn *Conn, e if useTLS { network = strings.TrimSuffix(network, "-tls") - // TODO(miekg): Enable after Go 1.18 is released, to be able to support two prev. releases. - /* - tlsDialer := tls.Dialer{ - NetDialer: &d, - Config: c.TLSConfig, - } - conn.Conn, err = tlsDialer.DialContext(ctx, network, address) - */ - conn.Conn, err = tls.DialWithDialer(&d, network, address, c.TLSConfig) + tlsDialer := tls.Dialer{ + NetDialer: &d, + Config: c.TLSConfig, + } + conn.Conn, err = tlsDialer.DialContext(ctx, network, address) } else { conn.Conn, err = d.DialContext(ctx, network, address) } @@ -183,33 +183,13 @@ func (c *Client) Exchange(m *Msg, address string) (r *Msg, rtt time.Duration, er // This allows users of the library to implement their own connection management, // as opposed to Exchange, which will always use new connections and incur the added overhead // that entails when using "tcp" and especially "tcp-tls" clients. -// -// When the singleflight is set for this client the context is _not_ forwarded to the (shared) exchange, to -// prevent one cancelation from canceling all outstanding requests. func (c *Client) ExchangeWithConn(m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) { - return c.exchangeWithConnContext(context.Background(), m, conn) + return c.ExchangeWithConnContext(context.Background(), m, conn) } -func (c *Client) exchangeWithConnContext(ctx context.Context, m *Msg, conn *Conn) (r *Msg, rtt time.Duration, err error) { - if !c.SingleInflight { - return c.exchangeContext(ctx, m, conn) - } - - q := m.Question[0] - key := fmt.Sprintf("%s:%d:%d", q.Name, q.Qtype, q.Qclass) - r, rtt, err, shared := c.group.Do(key, func() (*Msg, time.Duration, error) { - // When we're doing singleflight we don't want one context cancelation, cancel _all_ outstanding queries. - // Hence we ignore the context and use Background(). - return c.exchangeContext(context.Background(), m, conn) - }) - if r != nil && shared { - r = r.Copy() - } - - return r, rtt, err -} - -func (c *Client) exchangeContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) { +// ExchangeWithConnContext has the same behaviour as ExchangeWithConn and +// additionally obeys deadlines from the passed Context. +func (c *Client) ExchangeWithConnContext(ctx context.Context, m *Msg, co *Conn) (r *Msg, rtt time.Duration, err error) { opt := m.IsEdns0() // If EDNS0 is used use that for size. if opt != nil && opt.UDPSize() >= MinMsgSize { @@ -431,7 +411,6 @@ func ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, err error) // co.WriteMsg(m) // in, _ := co.ReadMsg() // co.Close() -// func ExchangeConn(c net.Conn, m *Msg) (r *Msg, err error) { println("dns: ExchangeConn: this function is deprecated") co := new(Conn) @@ -480,5 +459,5 @@ func (c *Client) ExchangeContext(ctx context.Context, m *Msg, a string) (r *Msg, } defer conn.Close() - return c.exchangeWithConnContext(ctx, m, conn) + return c.ExchangeWithConnContext(ctx, m, conn) } diff --git a/vendor/github.com/miekg/dns/clientconfig.go b/vendor/github.com/miekg/dns/clientconfig.go index e11b630d..d00ac62f 100644 --- a/vendor/github.com/miekg/dns/clientconfig.go +++ b/vendor/github.com/miekg/dns/clientconfig.go @@ -68,7 +68,7 @@ func ClientConfigFromReader(resolvconf io.Reader) (*ClientConfig, error) { } case "search": // set search path to given servers - c.Search = append([]string(nil), f[1:]...) + c.Search = cloneSlice(f[1:]) case "options": // magic options for _, s := range f[1:] { diff --git a/vendor/github.com/miekg/dns/defaults.go b/vendor/github.com/miekg/dns/defaults.go index f2cdbf43..02d9199a 100644 --- a/vendor/github.com/miekg/dns/defaults.go +++ b/vendor/github.com/miekg/dns/defaults.go @@ -22,8 +22,7 @@ func (dns *Msg) SetReply(request *Msg) *Msg { } dns.Rcode = RcodeSuccess if len(request.Question) > 0 { - dns.Question = make([]Question, 1) - dns.Question[0] = request.Question[0] + dns.Question = []Question{request.Question[0]} } return dns } @@ -208,7 +207,7 @@ func IsDomainName(s string) (labels int, ok bool) { } // check for \DDD - if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + if isDDD(s[i+1:]) { i += 3 begin += 3 } else { @@ -272,40 +271,39 @@ func IsMsg(buf []byte) error { // IsFqdn checks if a domain name is fully qualified. func IsFqdn(s string) bool { - s2 := strings.TrimSuffix(s, ".") - if s == s2 { + // Check for (and remove) a trailing dot, returning if there isn't one. + if s == "" || s[len(s)-1] != '.' { return false } + s = s[:len(s)-1] - i := strings.LastIndexFunc(s2, func(r rune) bool { + // If we don't have an escape sequence before the final dot, we know it's + // fully qualified and can return here. + if s == "" || s[len(s)-1] != '\\' { + return true + } + + // Otherwise we have to check if the dot is escaped or not by checking if + // there are an odd or even number of escape sequences before the dot. + i := strings.LastIndexFunc(s, func(r rune) bool { return r != '\\' }) - - // Test whether we have an even number of escape sequences before - // the dot or none. - return (len(s2)-i)%2 != 0 + return (len(s)-i)%2 != 0 } -// IsRRset checks if a set of RRs is a valid RRset as defined by RFC 2181. -// This means the RRs need to have the same type, name, and class. Returns true -// if the RR set is valid, otherwise false. +// IsRRset reports whether a set of RRs is a valid RRset as defined by RFC 2181. +// This means the RRs need to have the same type, name, and class. func IsRRset(rrset []RR) bool { if len(rrset) == 0 { return false } - if len(rrset) == 1 { - return true - } - rrHeader := rrset[0].Header() - rrType := rrHeader.Rrtype - rrClass := rrHeader.Class - rrName := rrHeader.Name + baseH := rrset[0].Header() for _, rr := range rrset[1:] { - curRRHeader := rr.Header() - if curRRHeader.Rrtype != rrType || curRRHeader.Class != rrClass || curRRHeader.Name != rrName { + curH := rr.Header() + if curH.Rrtype != baseH.Rrtype || curH.Class != baseH.Class || curH.Name != baseH.Name { // Mismatch between the records, so this is not a valid rrset for - //signing/verifying + // signing/verifying return false } } @@ -323,9 +321,15 @@ func Fqdn(s string) string { } // CanonicalName returns the domain name in canonical form. A name in canonical -// form is lowercase and fully qualified. See Section 6.2 in RFC 4034. +// form is lowercase and fully qualified. Only US-ASCII letters are affected. See +// Section 6.2 in RFC 4034. func CanonicalName(s string) string { - return strings.ToLower(Fqdn(s)) + return strings.Map(func(r rune) rune { + if r >= 'A' && r <= 'Z' { + r += 'a' - 'A' + } + return r + }, Fqdn(s)) } // Copied from the official Go code. diff --git a/vendor/github.com/miekg/dns/dnssec.go b/vendor/github.com/miekg/dns/dnssec.go index ea01aa81..1be87eae 100644 --- a/vendor/github.com/miekg/dns/dnssec.go +++ b/vendor/github.com/miekg/dns/dnssec.go @@ -128,10 +128,6 @@ type dnskeyWireFmt struct { /* Nothing is left out */ } -func divRoundUp(a, b int) int { - return (a + b - 1) / b -} - // KeyTag calculates the keytag (or key-id) of the DNSKEY. func (k *DNSKEY) KeyTag() uint16 { if k == nil { @@ -417,11 +413,11 @@ func (rr *RRSIG) Verify(k *DNSKEY, rrset []RR) error { return err } - sigbuf := rr.sigBuf() // Get the binary signature data - if rr.Algorithm == PRIVATEDNS { // PRIVATEOID - // TODO(miek) - // remove the domain name and assume its ours? - } + sigbuf := rr.sigBuf() // Get the binary signature data + // TODO(miek) + // remove the domain name and assume its ours? + // if rr.Algorithm == PRIVATEDNS { // PRIVATEOID + // } h, cryptohash, err := hashFromAlgorithm(rr.Algorithm) if err != nil { diff --git a/vendor/github.com/miekg/dns/dnssec_keyscan.go b/vendor/github.com/miekg/dns/dnssec_keyscan.go index f7965816..9c9972db 100644 --- a/vendor/github.com/miekg/dns/dnssec_keyscan.go +++ b/vendor/github.com/miekg/dns/dnssec_keyscan.go @@ -37,7 +37,8 @@ func (k *DNSKEY) ReadPrivateKey(q io.Reader, file string) (crypto.PrivateKey, er return nil, ErrPrivKey } // TODO(mg): check if the pubkey matches the private key - algo, err := strconv.ParseUint(strings.SplitN(m["algorithm"], " ", 2)[0], 10, 8) + algoStr, _, _ := strings.Cut(m["algorithm"], " ") + algo, err := strconv.ParseUint(algoStr, 10, 8) if err != nil { return nil, ErrPrivKey } @@ -159,7 +160,7 @@ func parseKey(r io.Reader, file string) (map[string]string, error) { k = l.token case zValue: if k == "" { - return nil, &ParseError{file, "no private key seen", l} + return nil, &ParseError{file: file, err: "no private key seen", lex: l} } m[strings.ToLower(k)] = l.token diff --git a/vendor/github.com/miekg/dns/doc.go b/vendor/github.com/miekg/dns/doc.go index f00f5722..586ab691 100644 --- a/vendor/github.com/miekg/dns/doc.go +++ b/vendor/github.com/miekg/dns/doc.go @@ -13,28 +13,28 @@ names in a message will result in a packing failure. Resource records are native types. They are not stored in wire format. Basic usage pattern for creating a new resource record: - r := new(dns.MX) - r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} - r.Preference = 10 - r.Mx = "mx.miek.nl." + r := new(dns.MX) + r.Hdr = dns.RR_Header{Name: "miek.nl.", Rrtype: dns.TypeMX, Class: dns.ClassINET, Ttl: 3600} + r.Preference = 10 + r.Mx = "mx.miek.nl." Or directly from a string: - mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") + mx, err := dns.NewRR("miek.nl. 3600 IN MX 10 mx.miek.nl.") Or when the default origin (.) and TTL (3600) and class (IN) suit you: - mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") + mx, err := dns.NewRR("miek.nl MX 10 mx.miek.nl") Or even: - mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") + mx, err := dns.NewRR("$ORIGIN nl.\nmiek 1H IN MX 10 mx.miek") In the DNS messages are exchanged, these messages contain resource records (sets). Use pattern for creating a message: - m := new(dns.Msg) - m.SetQuestion("miek.nl.", dns.TypeMX) + m := new(dns.Msg) + m.SetQuestion("miek.nl.", dns.TypeMX) Or when not certain if the domain name is fully qualified: @@ -45,17 +45,17 @@ records for the miek.nl. zone. The following is slightly more verbose, but more flexible: - m1 := new(dns.Msg) - m1.Id = dns.Id() - m1.RecursionDesired = true - m1.Question = make([]dns.Question, 1) - m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} + m1 := new(dns.Msg) + m1.Id = dns.Id() + m1.RecursionDesired = true + m1.Question = make([]dns.Question, 1) + m1.Question[0] = dns.Question{"miek.nl.", dns.TypeMX, dns.ClassINET} After creating a message it can be sent. Basic use pattern for synchronous querying the DNS at a server configured on 127.0.0.1 and port 53: - c := new(dns.Client) - in, rtt, err := c.Exchange(m1, "127.0.0.1:53") + c := new(dns.Client) + in, rtt, err := c.Exchange(m1, "127.0.0.1:53") Suppressing multiple outstanding queries (with the same question, type and class) is as easy as setting: @@ -72,7 +72,7 @@ and port to use for the connection: Port: 12345, Zone: "", } - c.Dialer := &net.Dialer{ + c.Dialer = &net.Dialer{ Timeout: 200 * time.Millisecond, LocalAddr: &laddr, } @@ -96,7 +96,7 @@ the Answer section: // do something with t.Txt } -Domain Name and TXT Character String Representations +# Domain Name and TXT Character String Representations Both domain names and TXT character strings are converted to presentation form both when unpacked and when converted to strings. @@ -108,7 +108,7 @@ be escaped. Bytes below 32 and above 127 will be converted to \DDD form. For domain names, in addition to the above rules brackets, periods, spaces, semicolons and the at symbol are escaped. -DNSSEC +# DNSSEC DNSSEC (DNS Security Extension) adds a layer of security to the DNS. It uses public key cryptography to sign resource records. The public keys are stored in @@ -117,12 +117,12 @@ DNSKEY records and the signatures in RRSIG records. Requesting DNSSEC information for a zone is done by adding the DO (DNSSEC OK) bit to a request. - m := new(dns.Msg) - m.SetEdns0(4096, true) + m := new(dns.Msg) + m.SetEdns0(4096, true) Signature generation, signature verification and key generation are all supported. -DYNAMIC UPDATES +# DYNAMIC UPDATES Dynamic updates reuses the DNS message format, but renames three of the sections. Question is Zone, Answer is Prerequisite, Authority is Update, only @@ -133,30 +133,30 @@ certain resource records or names in a zone to specify if resource records should be added or removed. The table from RFC 2136 supplemented with the Go DNS function shows which functions exist to specify the prerequisites. - 3.2.4 - Table Of Metavalues Used In Prerequisite Section + 3.2.4 - Table Of Metavalues Used In Prerequisite Section - CLASS TYPE RDATA Meaning Function - -------------------------------------------------------------- - ANY ANY empty Name is in use dns.NameUsed - ANY rrset empty RRset exists (value indep) dns.RRsetUsed - NONE ANY empty Name is not in use dns.NameNotUsed - NONE rrset empty RRset does not exist dns.RRsetNotUsed - zone rrset rr RRset exists (value dep) dns.Used + CLASS TYPE RDATA Meaning Function + -------------------------------------------------------------- + ANY ANY empty Name is in use dns.NameUsed + ANY rrset empty RRset exists (value indep) dns.RRsetUsed + NONE ANY empty Name is not in use dns.NameNotUsed + NONE rrset empty RRset does not exist dns.RRsetNotUsed + zone rrset rr RRset exists (value dep) dns.Used The prerequisite section can also be left empty. If you have decided on the prerequisites you can tell what RRs should be added or deleted. The next table shows the options you have and what functions to call. - 3.4.2.6 - Table Of Metavalues Used In Update Section + 3.4.2.6 - Table Of Metavalues Used In Update Section - CLASS TYPE RDATA Meaning Function - --------------------------------------------------------------- - ANY ANY empty Delete all RRsets from name dns.RemoveName - ANY rrset empty Delete an RRset dns.RemoveRRset - NONE rrset rr Delete an RR from RRset dns.Remove - zone rrset rr Add to an RRset dns.Insert + CLASS TYPE RDATA Meaning Function + --------------------------------------------------------------- + ANY ANY empty Delete all RRsets from name dns.RemoveName + ANY rrset empty Delete an RRset dns.RemoveRRset + NONE rrset rr Delete an RR from RRset dns.Remove + zone rrset rr Add to an RRset dns.Insert -TRANSACTION SIGNATURE +# TRANSACTION SIGNATURE An TSIG or transaction signature adds a HMAC TSIG record to each message sent. The supported algorithms include: HmacSHA1, HmacSHA256 and HmacSHA512. @@ -239,7 +239,7 @@ Basic use pattern validating and replying to a message that has TSIG set. w.WriteMsg(m) } -PRIVATE RRS +# PRIVATE RRS RFC 6895 sets aside a range of type codes for private use. This range is 65,280 - 65,534 (0xFF00 - 0xFFFE). When experimenting with new Resource Records these @@ -248,7 +248,7 @@ can be used, before requesting an official type code from IANA. See https://miek.nl/2014/september/21/idn-and-private-rr-in-go-dns/ for more information. -EDNS0 +# EDNS0 EDNS0 is an extension mechanism for the DNS defined in RFC 2671 and updated by RFC 6891. It defines a new RR type, the OPT RR, which is then completely @@ -279,9 +279,9 @@ SIG(0) From RFC 2931: - SIG(0) provides protection for DNS transactions and requests .... - ... protection for glue records, DNS requests, protection for message headers - on requests and responses, and protection of the overall integrity of a response. + SIG(0) provides protection for DNS transactions and requests .... + ... protection for glue records, DNS requests, protection for message headers + on requests and responses, and protection of the overall integrity of a response. It works like TSIG, except that SIG(0) uses public key cryptography, instead of the shared secret approach in TSIG. Supported algorithms: ECDSAP256SHA256, diff --git a/vendor/github.com/miekg/dns/edns.go b/vendor/github.com/miekg/dns/edns.go index 14568c2e..1b58e8f0 100644 --- a/vendor/github.com/miekg/dns/edns.go +++ b/vendor/github.com/miekg/dns/edns.go @@ -78,7 +78,10 @@ func (rr *OPT) String() string { if rr.Do() { s += "flags: do; " } else { - s += "flags: ; " + s += "flags:; " + } + if rr.Hdr.Ttl&0x7FFF != 0 { + s += fmt.Sprintf("MBZ: 0x%04x, ", rr.Hdr.Ttl&0x7FFF) } s += "udp: " + strconv.Itoa(int(rr.UDPSize())) @@ -98,6 +101,8 @@ func (rr *OPT) String() string { s += "\n; SUBNET: " + o.String() case *EDNS0_COOKIE: s += "\n; COOKIE: " + o.String() + case *EDNS0_EXPIRE: + s += "\n; EXPIRE: " + o.String() case *EDNS0_TCP_KEEPALIVE: s += "\n; KEEPALIVE: " + o.String() case *EDNS0_UL: @@ -180,7 +185,7 @@ func (rr *OPT) Do() bool { // SetDo sets the DO (DNSSEC OK) bit. // If we pass an argument, set the DO bit to that value. -// It is possible to pass 2 or more arguments. Any arguments after the 1st is silently ignored. +// It is possible to pass 2 or more arguments, but they will be ignored. func (rr *OPT) SetDo(do ...bool) { if len(do) == 1 { if do[0] { @@ -258,7 +263,7 @@ func (e *EDNS0_NSID) copy() EDNS0 { return &EDNS0_NSID{e.Code, e.Nsid} // o.Hdr.Name = "." // o.Hdr.Rrtype = dns.TypeOPT // e := new(dns.EDNS0_SUBNET) -// e.Code = dns.EDNS0SUBNET +// e.Code = dns.EDNS0SUBNET // by default this is filled in through unpacking OPT packets (unpackDataOpt) // e.Family = 1 // 1 for IPv4 source address, 2 for IPv6 // e.SourceNetmask = 32 // 32 for IPV4, 128 for IPv6 // e.SourceScope = 0 @@ -503,6 +508,7 @@ func (e *EDNS0_LLQ) String() string { " " + strconv.FormatUint(uint64(e.LeaseLife), 10) return s } + func (e *EDNS0_LLQ) copy() EDNS0 { return &EDNS0_LLQ{e.Code, e.Version, e.Opcode, e.Error, e.Id, e.LeaseLife} } @@ -515,8 +521,8 @@ type EDNS0_DAU struct { // Option implements the EDNS0 interface. func (e *EDNS0_DAU) Option() uint16 { return EDNS0DAU } -func (e *EDNS0_DAU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = b; return nil } +func (e *EDNS0_DAU) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil } +func (e *EDNS0_DAU) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil } func (e *EDNS0_DAU) String() string { s := "" @@ -539,8 +545,8 @@ type EDNS0_DHU struct { // Option implements the EDNS0 interface. func (e *EDNS0_DHU) Option() uint16 { return EDNS0DHU } -func (e *EDNS0_DHU) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = b; return nil } +func (e *EDNS0_DHU) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil } +func (e *EDNS0_DHU) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil } func (e *EDNS0_DHU) String() string { s := "" @@ -563,8 +569,8 @@ type EDNS0_N3U struct { // Option implements the EDNS0 interface. func (e *EDNS0_N3U) Option() uint16 { return EDNS0N3U } -func (e *EDNS0_N3U) pack() ([]byte, error) { return e.AlgCode, nil } -func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = b; return nil } +func (e *EDNS0_N3U) pack() ([]byte, error) { return cloneSlice(e.AlgCode), nil } +func (e *EDNS0_N3U) unpack(b []byte) error { e.AlgCode = cloneSlice(b); return nil } func (e *EDNS0_N3U) String() string { // Re-use the hash map @@ -641,30 +647,21 @@ type EDNS0_LOCAL struct { // Option implements the EDNS0 interface. func (e *EDNS0_LOCAL) Option() uint16 { return e.Code } + func (e *EDNS0_LOCAL) String() string { return strconv.FormatInt(int64(e.Code), 10) + ":0x" + hex.EncodeToString(e.Data) } + func (e *EDNS0_LOCAL) copy() EDNS0 { - b := make([]byte, len(e.Data)) - copy(b, e.Data) - return &EDNS0_LOCAL{e.Code, b} + return &EDNS0_LOCAL{e.Code, cloneSlice(e.Data)} } func (e *EDNS0_LOCAL) pack() ([]byte, error) { - b := make([]byte, len(e.Data)) - copied := copy(b, e.Data) - if copied != len(e.Data) { - return nil, ErrBuf - } - return b, nil + return cloneSlice(e.Data), nil } func (e *EDNS0_LOCAL) unpack(b []byte) error { - e.Data = make([]byte, len(b)) - copied := copy(e.Data, b) - if copied != len(b) { - return ErrBuf - } + e.Data = cloneSlice(b) return nil } @@ -727,14 +724,10 @@ type EDNS0_PADDING struct { // Option implements the EDNS0 interface. func (e *EDNS0_PADDING) Option() uint16 { return EDNS0PADDING } -func (e *EDNS0_PADDING) pack() ([]byte, error) { return e.Padding, nil } -func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = b; return nil } +func (e *EDNS0_PADDING) pack() ([]byte, error) { return cloneSlice(e.Padding), nil } +func (e *EDNS0_PADDING) unpack(b []byte) error { e.Padding = cloneSlice(b); return nil } func (e *EDNS0_PADDING) String() string { return fmt.Sprintf("%0X", e.Padding) } -func (e *EDNS0_PADDING) copy() EDNS0 { - b := make([]byte, len(e.Padding)) - copy(b, e.Padding) - return &EDNS0_PADDING{b} -} +func (e *EDNS0_PADDING) copy() EDNS0 { return &EDNS0_PADDING{cloneSlice(e.Padding)} } // Extended DNS Error Codes (RFC 8914). const ( @@ -821,7 +814,7 @@ func (e *EDNS0_EDE) String() string { func (e *EDNS0_EDE) pack() ([]byte, error) { b := make([]byte, 2+len(e.ExtraText)) binary.BigEndian.PutUint16(b[0:], e.InfoCode) - copy(b[2:], []byte(e.ExtraText)) + copy(b[2:], e.ExtraText) return b, nil } diff --git a/vendor/github.com/miekg/dns/fuzz.go b/vendor/github.com/miekg/dns/fuzz.go index 57410acd..505ae430 100644 --- a/vendor/github.com/miekg/dns/fuzz.go +++ b/vendor/github.com/miekg/dns/fuzz.go @@ -1,3 +1,4 @@ +//go:build fuzz // +build fuzz package dns diff --git a/vendor/github.com/miekg/dns/generate.go b/vendor/github.com/miekg/dns/generate.go index ac8df34d..a81d2bc5 100644 --- a/vendor/github.com/miekg/dns/generate.go +++ b/vendor/github.com/miekg/dns/generate.go @@ -35,17 +35,17 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) { token = token[:i] } - sx := strings.SplitN(token, "-", 2) - if len(sx) != 2 { + startStr, endStr, ok := strings.Cut(token, "-") + if !ok { return zp.setParseError("bad start-stop in $GENERATE range", l) } - start, err := strconv.ParseInt(sx[0], 10, 64) + start, err := strconv.ParseInt(startStr, 10, 64) if err != nil { return zp.setParseError("bad start in $GENERATE range", l) } - end, err := strconv.ParseInt(sx[1], 10, 64) + end, err := strconv.ParseInt(endStr, 10, 64) if err != nil { return zp.setParseError("bad stop in $GENERATE range", l) } @@ -54,7 +54,7 @@ func (zp *ZoneParser) generate(l lex) (RR, bool) { } // _BLANK - l, ok := zp.c.Next() + l, ok = zp.c.Next() if !ok || l.value != zBlank { return zp.setParseError("garbage after $GENERATE range", l) } @@ -116,7 +116,7 @@ func (r *generateReader) parseError(msg string, end int) *ParseError { l.token = r.s[r.si-1 : end] l.column += r.si // l.column starts one zBLANK before r.s - return &ParseError{r.file, msg, l} + return &ParseError{file: r.file, err: msg, lex: l} } func (r *generateReader) Read(p []byte) (int, error) { @@ -211,15 +211,16 @@ func (r *generateReader) ReadByte() (byte, error) { func modToPrintf(s string) (string, int64, string) { // Modifier is { offset [ ,width [ ,base ] ] } - provide default // values for optional width and type, if necessary. - var offStr, widthStr, base string - switch xs := strings.Split(s, ","); len(xs) { - case 1: - offStr, widthStr, base = xs[0], "0", "d" - case 2: - offStr, widthStr, base = xs[0], xs[1], "d" - case 3: - offStr, widthStr, base = xs[0], xs[1], xs[2] - default: + offStr, s, ok0 := strings.Cut(s, ",") + widthStr, s, ok1 := strings.Cut(s, ",") + base, _, ok2 := strings.Cut(s, ",") + if !ok0 { + widthStr = "0" + } + if !ok1 { + base = "d" + } + if ok2 { return "", 0, "bad modifier in $GENERATE" } @@ -234,8 +235,8 @@ func modToPrintf(s string) (string, int64, string) { return "", 0, "bad offset in $GENERATE" } - width, err := strconv.ParseInt(widthStr, 10, 64) - if err != nil || width < 0 || width > 255 { + width, err := strconv.ParseUint(widthStr, 10, 8) + if err != nil { return "", 0, "bad width in $GENERATE" } diff --git a/vendor/github.com/miekg/dns/labels.go b/vendor/github.com/miekg/dns/labels.go index f9faacfe..cd498d2e 100644 --- a/vendor/github.com/miekg/dns/labels.go +++ b/vendor/github.com/miekg/dns/labels.go @@ -122,7 +122,7 @@ func Split(s string) []int { } // NextLabel returns the index of the start of the next label in the -// string s starting at offset. +// string s starting at offset. A negative offset will cause a panic. // The bool end is true when the end of the string has been reached. // Also see PrevLabel. func NextLabel(s string, offset int) (i int, end bool) { diff --git a/vendor/github.com/miekg/dns/listen_no_reuseport.go b/vendor/github.com/miekg/dns/listen_no_reuseport.go index b9201417..8cebb2f1 100644 --- a/vendor/github.com/miekg/dns/listen_no_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_no_reuseport.go @@ -1,4 +1,5 @@ -// +build !go1.11 !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd package dns @@ -6,16 +7,18 @@ import "net" const supportsReusePort = false -func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { - if reuseport { +func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) { + if reuseport || reuseaddr { // TODO(tmthrgd): return an error? } return net.Listen(network, addr) } -func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { - if reuseport { +const supportsReuseAddr = false + +func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) { + if reuseport || reuseaddr { // TODO(tmthrgd): return an error? } diff --git a/vendor/github.com/miekg/dns/listen_reuseport.go b/vendor/github.com/miekg/dns/listen_reuseport.go index fad195cf..41326f20 100644 --- a/vendor/github.com/miekg/dns/listen_reuseport.go +++ b/vendor/github.com/miekg/dns/listen_reuseport.go @@ -1,4 +1,4 @@ -// +build go1.11 +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd // +build aix darwin dragonfly freebsd linux netbsd openbsd package dns @@ -25,19 +25,41 @@ func reuseportControl(network, address string, c syscall.RawConn) error { return opErr } -func listenTCP(network, addr string, reuseport bool) (net.Listener, error) { +const supportsReuseAddr = true + +func reuseaddrControl(network, address string, c syscall.RawConn) error { + var opErr error + err := c.Control(func(fd uintptr) { + opErr = unix.SetsockoptInt(int(fd), unix.SOL_SOCKET, unix.SO_REUSEADDR, 1) + }) + if err != nil { + return err + } + + return opErr +} + +func listenTCP(network, addr string, reuseport, reuseaddr bool) (net.Listener, error) { var lc net.ListenConfig - if reuseport { + switch { + case reuseaddr && reuseport: + case reuseport: lc.Control = reuseportControl + case reuseaddr: + lc.Control = reuseaddrControl } return lc.Listen(context.Background(), network, addr) } -func listenUDP(network, addr string, reuseport bool) (net.PacketConn, error) { +func listenUDP(network, addr string, reuseport, reuseaddr bool) (net.PacketConn, error) { var lc net.ListenConfig - if reuseport { + switch { + case reuseaddr && reuseport: + case reuseport: lc.Control = reuseportControl + case reuseaddr: + lc.Control = reuseaddrControl } return lc.ListenPacket(context.Background(), network, addr) diff --git a/vendor/github.com/miekg/dns/msg.go b/vendor/github.com/miekg/dns/msg.go index 89ebb64a..8294d039 100644 --- a/vendor/github.com/miekg/dns/msg.go +++ b/vendor/github.com/miekg/dns/msg.go @@ -252,7 +252,7 @@ loop: } // check for \DDD - if i+3 < ls && isDigit(bs[i+1]) && isDigit(bs[i+2]) && isDigit(bs[i+3]) { + if isDDD(bs[i+1:]) { bs[i] = dddToByte(bs[i+1:]) copy(bs[i+1:ls-3], bs[i+4:]) ls -= 3 @@ -448,7 +448,7 @@ Loop: return string(s), off1, nil } -func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { +func packTxt(txt []string, msg []byte, offset int) (int, error) { if len(txt) == 0 { if offset >= len(msg) { return offset, ErrBuf @@ -458,10 +458,7 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { } var err error for _, s := range txt { - if len(s) > len(tmp) { - return offset, ErrBuf - } - offset, err = packTxtString(s, msg, offset, tmp) + offset, err = packTxtString(s, msg, offset) if err != nil { return offset, err } @@ -469,32 +466,30 @@ func packTxt(txt []string, msg []byte, offset int, tmp []byte) (int, error) { return offset, nil } -func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { +func packTxtString(s string, msg []byte, offset int) (int, error) { lenByteOffset := offset - if offset >= len(msg) || len(s) > len(tmp) { + if offset >= len(msg) || len(s) > 256*4+1 /* If all \DDD */ { return offset, ErrBuf } offset++ - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { + for i := 0; i < len(s); i++ { if len(msg) <= offset { return offset, ErrBuf } - if bs[i] == '\\' { + if s[i] == '\\' { i++ - if i == len(bs) { + if i == len(s) { break } // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) + if isDDD(s[i:]) { + msg[offset] = dddToByte(s[i:]) i += 2 } else { - msg[offset] = bs[i] + msg[offset] = s[i] } } else { - msg[offset] = bs[i] + msg[offset] = s[i] } offset++ } @@ -506,30 +501,28 @@ func packTxtString(s string, msg []byte, offset int, tmp []byte) (int, error) { return offset, nil } -func packOctetString(s string, msg []byte, offset int, tmp []byte) (int, error) { - if offset >= len(msg) || len(s) > len(tmp) { +func packOctetString(s string, msg []byte, offset int) (int, error) { + if offset >= len(msg) || len(s) > 256*4+1 { return offset, ErrBuf } - bs := tmp[:len(s)] - copy(bs, s) - for i := 0; i < len(bs); i++ { + for i := 0; i < len(s); i++ { if len(msg) <= offset { return offset, ErrBuf } - if bs[i] == '\\' { + if s[i] == '\\' { i++ - if i == len(bs) { + if i == len(s) { break } // check for \DDD - if i+2 < len(bs) && isDigit(bs[i]) && isDigit(bs[i+1]) && isDigit(bs[i+2]) { - msg[offset] = dddToByte(bs[i:]) + if isDDD(s[i:]) { + msg[offset] = dddToByte(s[i:]) i += 2 } else { - msg[offset] = bs[i] + msg[offset] = s[i] } } else { - msg[offset] = bs[i] + msg[offset] = s[i] } offset++ } @@ -551,12 +544,11 @@ func unpackTxt(msg []byte, off0 int) (ss []string, off int, err error) { // Helpers for dealing with escaped bytes func isDigit(b byte) bool { return b >= '0' && b <= '9' } -func dddToByte(s []byte) byte { - _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 - return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) +func isDDD[T ~[]byte | ~string](s T) bool { + return len(s) >= 3 && isDigit(s[0]) && isDigit(s[1]) && isDigit(s[2]) } -func dddStringToByte(s string) byte { +func dddToByte[T ~[]byte | ~string](s T) byte { _ = s[2] // bounds check hint to compiler; see golang.org/issue/14808 return byte((s[0]-'0')*100 + (s[1]-'0')*10 + (s[2] - '0')) } @@ -680,9 +672,9 @@ func unpackRRslice(l int, msg []byte, off int) (dst1 []RR, off1 int, err error) // Convert a MsgHdr to a string, with dig-like headers: // -//;; opcode: QUERY, status: NOERROR, id: 48404 +// ;; opcode: QUERY, status: NOERROR, id: 48404 // -//;; flags: qr aa rd ra; +// ;; flags: qr aa rd ra; func (h *MsgHdr) String() string { if h == nil { return " MsgHdr" @@ -866,7 +858,7 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) { // The header counts might have been wrong so we need to update it dh.Nscount = uint16(len(dns.Ns)) if err == nil { - dns.Extra, off, err = unpackRRslice(int(dh.Arcount), msg, off) + dns.Extra, _, err = unpackRRslice(int(dh.Arcount), msg, off) } // The header counts might have been wrong so we need to update it dh.Arcount = uint16(len(dns.Extra)) @@ -876,11 +868,11 @@ func (dns *Msg) unpack(dh Header, msg []byte, off int) (err error) { dns.Rcode |= opt.ExtendedRcode() } - if off != len(msg) { - // TODO(miek) make this an error? - // use PackOpt to let people tell how detailed the error reporting should be? - // println("dns: extra bytes in dns packet", off, "<", len(msg)) - } + // TODO(miek) make this an error? + // use PackOpt to let people tell how detailed the error reporting should be? + // if off != len(msg) { + // // println("dns: extra bytes in dns packet", off, "<", len(msg)) + // } return err } @@ -902,23 +894,38 @@ func (dns *Msg) String() string { return " MsgHdr" } s := dns.MsgHdr.String() + " " - s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " - s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " - s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " - s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + if dns.MsgHdr.Opcode == OpcodeUpdate { + s += "ZONE: " + strconv.Itoa(len(dns.Question)) + ", " + s += "PREREQ: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "UPDATE: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + } else { + s += "QUERY: " + strconv.Itoa(len(dns.Question)) + ", " + s += "ANSWER: " + strconv.Itoa(len(dns.Answer)) + ", " + s += "AUTHORITY: " + strconv.Itoa(len(dns.Ns)) + ", " + s += "ADDITIONAL: " + strconv.Itoa(len(dns.Extra)) + "\n" + } opt := dns.IsEdns0() if opt != nil { // OPT PSEUDOSECTION s += opt.String() + "\n" } if len(dns.Question) > 0 { - s += "\n;; QUESTION SECTION:\n" + if dns.MsgHdr.Opcode == OpcodeUpdate { + s += "\n;; ZONE SECTION:\n" + } else { + s += "\n;; QUESTION SECTION:\n" + } for _, r := range dns.Question { s += r.String() + "\n" } } if len(dns.Answer) > 0 { - s += "\n;; ANSWER SECTION:\n" + if dns.MsgHdr.Opcode == OpcodeUpdate { + s += "\n;; PREREQUISITE SECTION:\n" + } else { + s += "\n;; ANSWER SECTION:\n" + } for _, r := range dns.Answer { if r != nil { s += r.String() + "\n" @@ -926,7 +933,11 @@ func (dns *Msg) String() string { } } if len(dns.Ns) > 0 { - s += "\n;; AUTHORITY SECTION:\n" + if dns.MsgHdr.Opcode == OpcodeUpdate { + s += "\n;; UPDATE SECTION:\n" + } else { + s += "\n;; AUTHORITY SECTION:\n" + } for _, r := range dns.Ns { if r != nil { s += r.String() + "\n" @@ -1024,7 +1035,7 @@ func escapedNameLen(s string) int { continue } - if i+3 < len(s) && isDigit(s[i+1]) && isDigit(s[i+2]) && isDigit(s[i+3]) { + if isDDD(s[i+1:]) { nameLen -= 3 i += 3 } else { @@ -1065,8 +1076,8 @@ func (dns *Msg) CopyTo(r1 *Msg) *Msg { r1.Compress = dns.Compress if len(dns.Question) > 0 { - r1.Question = make([]Question, len(dns.Question)) - copy(r1.Question, dns.Question) // TODO(miek): Question is an immutable value, ok to do a shallow-copy + // TODO(miek): Question is an immutable value, ok to do a shallow-copy + r1.Question = cloneSlice(dns.Question) } rrArr := make([]RR, len(dns.Answer)+len(dns.Ns)+len(dns.Extra)) diff --git a/vendor/github.com/miekg/dns/msg_helpers.go b/vendor/github.com/miekg/dns/msg_helpers.go index ea2035cd..acec21f7 100644 --- a/vendor/github.com/miekg/dns/msg_helpers.go +++ b/vendor/github.com/miekg/dns/msg_helpers.go @@ -20,9 +20,7 @@ func unpackDataA(msg []byte, off int) (net.IP, int, error) { if off+net.IPv4len > len(msg) { return nil, len(msg), &Error{err: "overflow unpacking a"} } - a := append(make(net.IP, 0, net.IPv4len), msg[off:off+net.IPv4len]...) - off += net.IPv4len - return a, off, nil + return cloneSlice(msg[off : off+net.IPv4len]), off + net.IPv4len, nil } func packDataA(a net.IP, msg []byte, off int) (int, error) { @@ -47,9 +45,7 @@ func unpackDataAAAA(msg []byte, off int) (net.IP, int, error) { if off+net.IPv6len > len(msg) { return nil, len(msg), &Error{err: "overflow unpacking aaaa"} } - aaaa := append(make(net.IP, 0, net.IPv6len), msg[off:off+net.IPv6len]...) - off += net.IPv6len - return aaaa, off, nil + return cloneSlice(msg[off : off+net.IPv6len]), off + net.IPv6len, nil } func packDataAAAA(aaaa net.IP, msg []byte, off int) (int, error) { @@ -299,8 +295,7 @@ func unpackString(msg []byte, off int) (string, int, error) { } func packString(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packTxtString(s, msg, off, txtTmp) + off, err := packTxtString(s, msg, off) if err != nil { return len(msg), err } @@ -402,8 +397,7 @@ func unpackStringTxt(msg []byte, off int) ([]string, int, error) { } func packStringTxt(s []string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) // If the whole string consists out of \DDD we need this many. - off, err := packTxt(s, msg, off, txtTmp) + off, err := packTxt(s, msg, off) if err != nil { return len(msg), err } @@ -412,29 +406,24 @@ func packStringTxt(s []string, msg []byte, off int) (int, error) { func unpackDataOpt(msg []byte, off int) ([]EDNS0, int, error) { var edns []EDNS0 -Option: - var code uint16 - if off+4 > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} + for off < len(msg) { + if off+4 > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + code := binary.BigEndian.Uint16(msg[off:]) + off += 2 + optlen := binary.BigEndian.Uint16(msg[off:]) + off += 2 + if off+int(optlen) > len(msg) { + return nil, len(msg), &Error{err: "overflow unpacking opt"} + } + opt := makeDataOpt(code) + if err := opt.unpack(msg[off : off+int(optlen)]); err != nil { + return nil, len(msg), err + } + edns = append(edns, opt) + off += int(optlen) } - code = binary.BigEndian.Uint16(msg[off:]) - off += 2 - optlen := binary.BigEndian.Uint16(msg[off:]) - off += 2 - if off+int(optlen) > len(msg) { - return nil, len(msg), &Error{err: "overflow unpacking opt"} - } - e := makeDataOpt(code) - if err := e.unpack(msg[off : off+int(optlen)]); err != nil { - return nil, len(msg), err - } - edns = append(edns, e) - off += int(optlen) - - if off < len(msg) { - goto Option - } - return edns, off, nil } @@ -463,8 +452,7 @@ func unpackStringOctet(msg []byte, off int) (string, int, error) { } func packStringOctet(s string, msg []byte, off int) (int, error) { - txtTmp := make([]byte, 256*4+1) - off, err := packOctetString(s, msg, off, txtTmp) + off, err := packOctetString(s, msg, off) if err != nil { return len(msg), err } @@ -625,7 +613,7 @@ func unpackDataSVCB(msg []byte, off int) ([]SVCBKeyValue, int, error) { } func packDataSVCB(pairs []SVCBKeyValue, msg []byte, off int) (int, error) { - pairs = append([]SVCBKeyValue(nil), pairs...) + pairs = cloneSlice(pairs) sort.Slice(pairs, func(i, j int) bool { return pairs[i].Key() < pairs[j].Key() }) @@ -810,3 +798,37 @@ func unpackDataAplPrefix(msg []byte, off int) (APLPrefix, int, error) { Network: ipnet, }, off, nil } + +func unpackIPSECGateway(msg []byte, off int, gatewayType uint8) (net.IP, string, int, error) { + var retAddr net.IP + var retString string + var err error + + switch gatewayType { + case IPSECGatewayNone: // do nothing + case IPSECGatewayIPv4: + retAddr, off, err = unpackDataA(msg, off) + case IPSECGatewayIPv6: + retAddr, off, err = unpackDataAAAA(msg, off) + case IPSECGatewayHost: + retString, off, err = UnpackDomainName(msg, off) + } + + return retAddr, retString, off, err +} + +func packIPSECGateway(gatewayAddr net.IP, gatewayString string, msg []byte, off int, gatewayType uint8, compression compressionMap, compress bool) (int, error) { + var err error + + switch gatewayType { + case IPSECGatewayNone: // do nothing + case IPSECGatewayIPv4: + off, err = packDataA(gatewayAddr, msg, off) + case IPSECGatewayIPv6: + off, err = packDataAAAA(gatewayAddr, msg, off) + case IPSECGatewayHost: + off, err = packDomainName(gatewayString, msg, off, compression, compress) + } + + return off, err +} diff --git a/vendor/github.com/miekg/dns/privaterr.go b/vendor/github.com/miekg/dns/privaterr.go index d256b652..350ea5a4 100644 --- a/vendor/github.com/miekg/dns/privaterr.go +++ b/vendor/github.com/miekg/dns/privaterr.go @@ -84,7 +84,7 @@ Fetch: err := r.Data.Parse(text) if err != nil { - return &ParseError{"", err.Error(), l} + return &ParseError{wrappedErr: err, lex: l} } return nil diff --git a/vendor/github.com/miekg/dns/scan.go b/vendor/github.com/miekg/dns/scan.go index 57be9882..1f92ae42 100644 --- a/vendor/github.com/miekg/dns/scan.go +++ b/vendor/github.com/miekg/dns/scan.go @@ -4,19 +4,21 @@ import ( "bufio" "fmt" "io" + "io/fs" "os" + "path" "path/filepath" "strconv" "strings" ) -const maxTok = 2048 // Largest token we can return. +const maxTok = 512 // Token buffer start size, and growth size amount. // The maximum depth of $INCLUDE directives supported by the // ZoneParser API. const maxIncludeDepth = 7 -// Tokinize a RFC 1035 zone file. The tokenizer will normalize it: +// Tokenize a RFC 1035 zone file. The tokenizer will normalize it: // * Add ownernames if they are left blank; // * Suppress sequences of spaces; // * Make each RR fit on one line (_NEWLINE is send as last) @@ -64,20 +66,26 @@ const ( // ParseError is a parsing error. It contains the parse error and the location in the io.Reader // where the error occurred. type ParseError struct { - file string - err string - lex lex + file string + err string + wrappedErr error + lex lex } func (e *ParseError) Error() (s string) { if e.file != "" { s = e.file + ": " } + if e.err == "" && e.wrappedErr != nil { + e.err = e.wrappedErr.Error() + } s += "dns: " + e.err + ": " + strconv.QuoteToASCII(e.lex.token) + " at line: " + strconv.Itoa(e.lex.line) + ":" + strconv.Itoa(e.lex.column) return } +func (e *ParseError) Unwrap() error { return e.wrappedErr } + type lex struct { token string // text of the token err bool // when true, token text has lexer error @@ -168,8 +176,9 @@ type ZoneParser struct { // sub is used to parse $INCLUDE files and $GENERATE directives. // Next, by calling subNext, forwards the resulting RRs from this // sub parser to the calling code. - sub *ZoneParser - osFile *os.File + sub *ZoneParser + r io.Reader + fsys fs.FS includeDepth uint8 @@ -188,7 +197,7 @@ func NewZoneParser(r io.Reader, origin, file string) *ZoneParser { if origin != "" { origin = Fqdn(origin) if _, ok := IsDomainName(origin); !ok { - pe = &ParseError{file, "bad initial origin name", lex{}} + pe = &ParseError{file: file, err: "bad initial origin name"} } } @@ -220,6 +229,24 @@ func (zp *ZoneParser) SetIncludeAllowed(v bool) { zp.includeAllowed = v } +// SetIncludeFS provides an [fs.FS] to use when looking for the target of +// $INCLUDE directives. ($INCLUDE must still be enabled separately by calling +// [ZoneParser.SetIncludeAllowed].) If fsys is nil, [os.Open] will be used. +// +// When fsys is an on-disk FS, the ability of $INCLUDE to reach files from +// outside its root directory depends upon the FS implementation. For +// instance, [os.DirFS] will refuse to open paths like "../../etc/passwd", +// however it will still follow links which may point anywhere on the system. +// +// FS paths are slash-separated on all systems, even Windows. $INCLUDE paths +// containing other characters such as backslash and colon may be accepted as +// valid, but those characters will never be interpreted by an FS +// implementation as path element separators. See [fs.ValidPath] for more +// details. +func (zp *ZoneParser) SetIncludeFS(fsys fs.FS) { + zp.fsys = fsys +} + // Err returns the first non-EOF error that was encountered by the // ZoneParser. func (zp *ZoneParser) Err() error { @@ -237,7 +264,7 @@ func (zp *ZoneParser) Err() error { } func (zp *ZoneParser) setParseError(err string, l lex) (RR, bool) { - zp.parseErr = &ParseError{zp.file, err, l} + zp.parseErr = &ParseError{file: zp.file, err: err, lex: l} return nil, false } @@ -260,9 +287,11 @@ func (zp *ZoneParser) subNext() (RR, bool) { return rr, true } - if zp.sub.osFile != nil { - zp.sub.osFile.Close() - zp.sub.osFile = nil + if zp.sub.r != nil { + if c, ok := zp.sub.r.(io.Closer); ok { + c.Close() + } + zp.sub.r = nil } if zp.sub.Err() != nil { @@ -402,24 +431,44 @@ func (zp *ZoneParser) Next() (RR, bool) { // Start with the new file includePath := l.token - if !filepath.IsAbs(includePath) { - includePath = filepath.Join(filepath.Dir(zp.file), includePath) - } - - r1, e1 := os.Open(includePath) - if e1 != nil { - var as string - if !filepath.IsAbs(l.token) { - as = fmt.Sprintf(" as `%s'", includePath) + var r1 io.Reader + var e1 error + if zp.fsys != nil { + // fs.FS always uses / as separator, even on Windows, so use + // path instead of filepath here: + if !path.IsAbs(includePath) { + includePath = path.Join(path.Dir(zp.file), includePath) } - msg := fmt.Sprintf("failed to open `%s'%s: %v", l.token, as, e1) - return zp.setParseError(msg, l) + // os.DirFS, and probably others, expect all paths to be + // relative, so clean the path and remove leading / if + // present: + includePath = strings.TrimLeft(path.Clean(includePath), "/") + + r1, e1 = zp.fsys.Open(includePath) + } else { + if !filepath.IsAbs(includePath) { + includePath = filepath.Join(filepath.Dir(zp.file), includePath) + } + r1, e1 = os.Open(includePath) + } + if e1 != nil { + var as string + if includePath != l.token { + as = fmt.Sprintf(" as `%s'", includePath) + } + zp.parseErr = &ParseError{ + file: zp.file, + wrappedErr: fmt.Errorf("failed to open `%s'%s: %w", l.token, as, e1), + lex: l, + } + return nil, false } zp.sub = NewZoneParser(r1, neworigin, includePath) - zp.sub.defttl, zp.sub.includeDepth, zp.sub.osFile = zp.defttl, zp.includeDepth+1, r1 + zp.sub.defttl, zp.sub.includeDepth, zp.sub.r = zp.defttl, zp.includeDepth+1, r1 zp.sub.SetIncludeAllowed(true) + zp.sub.SetIncludeFS(zp.fsys) return zp.subNext() case zExpectDirTTLBl: if l.value != zBlank { @@ -605,8 +654,6 @@ func (zp *ZoneParser) Next() (RR, bool) { if !isPrivate && zp.c.Peek().token == "" { // This is a dynamic update rr. - // TODO(tmthrgd): Previously slurpRemainder was only called - // for certain RR types, which may have been important. if err := slurpRemainder(zp.c); err != nil { return zp.setParseError(err.err, err.lex) } @@ -765,8 +812,8 @@ func (zl *zlexer) Next() (lex, bool) { } var ( - str [maxTok]byte // Hold string text - com [maxTok]byte // Hold comment text + str = make([]byte, maxTok) // Hold string text + com = make([]byte, maxTok) // Hold comment text stri int // Offset in str (0 means empty) comi int // Offset in com (0 means empty) @@ -785,14 +832,12 @@ func (zl *zlexer) Next() (lex, bool) { l.line, l.column = zl.line, zl.column if stri >= len(str) { - l.token = "token length insufficient for parsing" - l.err = true - return *l, true + // if buffer length is insufficient, increase it. + str = append(str[:], make([]byte, maxTok)...) } if comi >= len(com) { - l.token = "comment length insufficient for parsing" - l.err = true - return *l, true + // if buffer length is insufficient, increase it. + com = append(com[:], make([]byte, maxTok)...) } switch x { @@ -816,7 +861,7 @@ func (zl *zlexer) Next() (lex, bool) { if stri == 0 { // Space directly in the beginning, handled in the grammar } else if zl.owner { - // If we have a string and its the first, make it an owner + // If we have a string and it's the first, make it an owner l.value = zOwner l.token = string(str[:stri]) @@ -1218,42 +1263,34 @@ func stringToCm(token string) (e, m uint8, ok bool) { if token[len(token)-1] == 'M' || token[len(token)-1] == 'm' { token = token[0 : len(token)-1] } - s := strings.SplitN(token, ".", 2) - var meters, cmeters, val int - var err error - switch len(s) { - case 2: - if cmeters, err = strconv.Atoi(s[1]); err != nil { - return - } + + var ( + meters, cmeters, val int + err error + ) + mStr, cmStr, hasCM := strings.Cut(token, ".") + if hasCM { // There's no point in having more than 2 digits in this part, and would rather make the implementation complicated ('123' should be treated as '12'). // So we simply reject it. // We also make sure the first character is a digit to reject '+-' signs. - if len(s[1]) > 2 || s[1][0] < '0' || s[1][0] > '9' { + cmeters, err = strconv.Atoi(cmStr) + if err != nil || len(cmStr) > 2 || cmStr[0] < '0' || cmStr[0] > '9' { return } - if len(s[1]) == 1 { + if len(cmStr) == 1 { // 'nn.1' must be treated as 'nn-meters and 10cm, not 1cm. cmeters *= 10 } - if s[0] == "" { - // This will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm). - break - } - fallthrough - case 1: - if meters, err = strconv.Atoi(s[0]); err != nil { - return - } - // RFC1876 states the max value is 90000000.00. The latter two conditions enforce it. - if s[0][0] < '0' || s[0][0] > '9' || meters > 90000000 || (meters == 90000000 && cmeters != 0) { - return - } - case 0: - // huh? - return 0, 0, false } - ok = true + // This slighly ugly condition will allow omitting the 'meter' part, like .01 (meaning 0.01m = 1cm). + if !hasCM || mStr != "" { + meters, err = strconv.Atoi(mStr) + // RFC1876 states the max value is 90000000.00. The latter two conditions enforce it. + if err != nil || mStr[0] < '0' || mStr[0] > '9' || meters > 90000000 || (meters == 90000000 && cmeters != 0) { + return + } + } + if meters > 0 { e = 2 val = meters @@ -1265,8 +1302,7 @@ func stringToCm(token string) (e, m uint8, ok bool) { e++ val /= 10 } - m = uint8(val) - return + return e, uint8(val), true } func toAbsoluteName(name, origin string) (absolute string, ok bool) { @@ -1339,12 +1375,12 @@ func slurpRemainder(c *zlexer) *ParseError { case zBlank: l, _ = c.Next() if l.value != zNewline && l.value != zEOF { - return &ParseError{"", "garbage after rdata", l} + return &ParseError{err: "garbage after rdata", lex: l} } case zNewline: case zEOF: default: - return &ParseError{"", "garbage after rdata", l} + return &ParseError{err: "garbage after rdata", lex: l} } return nil } @@ -1353,16 +1389,16 @@ func slurpRemainder(c *zlexer) *ParseError { // Used for NID and L64 record. func stringToNodeID(l lex) (uint64, *ParseError) { if len(l.token) < 19 { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + return 0, &ParseError{file: l.token, err: "bad NID/L64 NodeID/Locator64", lex: l} } // There must be three colons at fixes positions, if not its a parse error if l.token[4] != ':' && l.token[9] != ':' && l.token[14] != ':' { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + return 0, &ParseError{file: l.token, err: "bad NID/L64 NodeID/Locator64", lex: l} } s := l.token[0:4] + l.token[5:9] + l.token[10:14] + l.token[15:19] u, err := strconv.ParseUint(s, 16, 64) if err != nil { - return 0, &ParseError{l.token, "bad NID/L64 NodeID/Locator64", l} + return 0, &ParseError{file: l.token, err: "bad NID/L64 NodeID/Locator64", lex: l} } return u, nil } diff --git a/vendor/github.com/miekg/dns/scan_rr.go b/vendor/github.com/miekg/dns/scan_rr.go index e398484d..1a90c61f 100644 --- a/vendor/github.com/miekg/dns/scan_rr.go +++ b/vendor/github.com/miekg/dns/scan_rr.go @@ -1,8 +1,9 @@ package dns import ( - "bytes" "encoding/base64" + "errors" + "fmt" "net" "strconv" "strings" @@ -11,23 +12,23 @@ import ( // A remainder of the rdata with embedded spaces, return the parsed string (sans the spaces) // or an error func endingToString(c *zlexer, errstr string) (string, *ParseError) { - var buffer bytes.Buffer + var s strings.Builder l, _ := c.Next() // zString for l.value != zNewline && l.value != zEOF { if l.err { - return buffer.String(), &ParseError{"", errstr, l} + return s.String(), &ParseError{err: errstr, lex: l} } switch l.value { case zString: - buffer.WriteString(l.token) + s.WriteString(l.token) case zBlank: // Ok default: - return "", &ParseError{"", errstr, l} + return "", &ParseError{err: errstr, lex: l} } l, _ = c.Next() } - return buffer.String(), nil + return s.String(), nil } // A remainder of the rdata with embedded spaces, split on unquoted whitespace @@ -36,7 +37,7 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { // Get the remaining data until we see a zNewline l, _ := c.Next() if l.err { - return nil, &ParseError{"", errstr, l} + return nil, &ParseError{err: errstr, lex: l} } // Build the slice @@ -45,7 +46,7 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { empty := false for l.value != zNewline && l.value != zEOF { if l.err { - return nil, &ParseError{"", errstr, l} + return nil, &ParseError{err: errstr, lex: l} } switch l.value { case zString: @@ -72,7 +73,7 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { case zBlank: if quote { // zBlank can only be seen in between txt parts. - return nil, &ParseError{"", errstr, l} + return nil, &ParseError{err: errstr, lex: l} } case zQuote: if empty && quote { @@ -81,13 +82,13 @@ func endingToTxtSlice(c *zlexer, errstr string) ([]string, *ParseError) { quote = !quote empty = true default: - return nil, &ParseError{"", errstr, l} + return nil, &ParseError{err: errstr, lex: l} } l, _ = c.Next() } if quote { - return nil, &ParseError{"", errstr, l} + return nil, &ParseError{err: errstr, lex: l} } return s, nil @@ -102,7 +103,7 @@ func (rr *A) parse(c *zlexer, o string) *ParseError { // IPv4. isIPv4 := !strings.Contains(l.token, ":") if rr.A == nil || !isIPv4 || l.err { - return &ParseError{"", "bad A A", l} + return &ParseError{err: "bad A A", lex: l} } return slurpRemainder(c) } @@ -114,7 +115,7 @@ func (rr *AAAA) parse(c *zlexer, o string) *ParseError { // addresses cannot include ":". isIPv6 := strings.Contains(l.token, ":") if rr.AAAA == nil || !isIPv6 || l.err { - return &ParseError{"", "bad AAAA AAAA", l} + return &ParseError{err: "bad AAAA AAAA", lex: l} } return slurpRemainder(c) } @@ -123,7 +124,7 @@ func (rr *NS) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad NS Ns", l} + return &ParseError{err: "bad NS Ns", lex: l} } rr.Ns = name return slurpRemainder(c) @@ -133,7 +134,7 @@ func (rr *PTR) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad PTR Ptr", l} + return &ParseError{err: "bad PTR Ptr", lex: l} } rr.Ptr = name return slurpRemainder(c) @@ -143,7 +144,7 @@ func (rr *NSAPPTR) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad NSAP-PTR Ptr", l} + return &ParseError{err: "bad NSAP-PTR Ptr", lex: l} } rr.Ptr = name return slurpRemainder(c) @@ -153,7 +154,7 @@ func (rr *RP) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() mbox, mboxOk := toAbsoluteName(l.token, o) if l.err || !mboxOk { - return &ParseError{"", "bad RP Mbox", l} + return &ParseError{err: "bad RP Mbox", lex: l} } rr.Mbox = mbox @@ -163,7 +164,7 @@ func (rr *RP) parse(c *zlexer, o string) *ParseError { txt, txtOk := toAbsoluteName(l.token, o) if l.err || !txtOk { - return &ParseError{"", "bad RP Txt", l} + return &ParseError{err: "bad RP Txt", lex: l} } rr.Txt = txt @@ -174,7 +175,7 @@ func (rr *MR) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MR Mr", l} + return &ParseError{err: "bad MR Mr", lex: l} } rr.Mr = name return slurpRemainder(c) @@ -184,7 +185,7 @@ func (rr *MB) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MB Mb", l} + return &ParseError{err: "bad MB Mb", lex: l} } rr.Mb = name return slurpRemainder(c) @@ -194,7 +195,7 @@ func (rr *MG) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MG Mg", l} + return &ParseError{err: "bad MG Mg", lex: l} } rr.Mg = name return slurpRemainder(c) @@ -219,6 +220,29 @@ func (rr *HINFO) parse(c *zlexer, o string) *ParseError { rr.Cpu = chunks[0] rr.Os = strings.Join(chunks[1:], " ") + return nil +} + +// according to RFC 1183 the parsing is identical to HINFO, so just use that code. +func (rr *ISDN) parse(c *zlexer, o string) *ParseError { + chunks, e := endingToTxtSlice(c, "bad ISDN Fields") + if e != nil { + return e + } + + if ln := len(chunks); ln == 0 { + return nil + } else if ln == 1 { + // Can we split it? + if out := strings.Fields(chunks[0]); len(out) > 1 { + chunks = out + } else { + chunks = append(chunks, "") + } + } + + rr.Address = chunks[0] + rr.SubAddress = strings.Join(chunks[1:], " ") return nil } @@ -227,7 +251,7 @@ func (rr *MINFO) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() rmail, rmailOk := toAbsoluteName(l.token, o) if l.err || !rmailOk { - return &ParseError{"", "bad MINFO Rmail", l} + return &ParseError{err: "bad MINFO Rmail", lex: l} } rr.Rmail = rmail @@ -237,7 +261,7 @@ func (rr *MINFO) parse(c *zlexer, o string) *ParseError { email, emailOk := toAbsoluteName(l.token, o) if l.err || !emailOk { - return &ParseError{"", "bad MINFO Email", l} + return &ParseError{err: "bad MINFO Email", lex: l} } rr.Email = email @@ -248,7 +272,7 @@ func (rr *MF) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MF Mf", l} + return &ParseError{err: "bad MF Mf", lex: l} } rr.Mf = name return slurpRemainder(c) @@ -258,7 +282,7 @@ func (rr *MD) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MD Md", l} + return &ParseError{err: "bad MD Md", lex: l} } rr.Md = name return slurpRemainder(c) @@ -268,7 +292,7 @@ func (rr *MX) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad MX Pref", l} + return &ParseError{err: "bad MX Pref", lex: l} } rr.Preference = uint16(i) @@ -278,7 +302,7 @@ func (rr *MX) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad MX Mx", l} + return &ParseError{err: "bad MX Mx", lex: l} } rr.Mx = name @@ -289,7 +313,7 @@ func (rr *RT) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil { - return &ParseError{"", "bad RT Preference", l} + return &ParseError{err: "bad RT Preference", lex: l} } rr.Preference = uint16(i) @@ -299,7 +323,7 @@ func (rr *RT) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad RT Host", l} + return &ParseError{err: "bad RT Host", lex: l} } rr.Host = name @@ -310,7 +334,7 @@ func (rr *AFSDB) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad AFSDB Subtype", l} + return &ParseError{err: "bad AFSDB Subtype", lex: l} } rr.Subtype = uint16(i) @@ -320,7 +344,7 @@ func (rr *AFSDB) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad AFSDB Hostname", l} + return &ParseError{err: "bad AFSDB Hostname", lex: l} } rr.Hostname = name return slurpRemainder(c) @@ -329,7 +353,7 @@ func (rr *AFSDB) parse(c *zlexer, o string) *ParseError { func (rr *X25) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() if l.err { - return &ParseError{"", "bad X25 PSDNAddress", l} + return &ParseError{err: "bad X25 PSDNAddress", lex: l} } rr.PSDNAddress = l.token return slurpRemainder(c) @@ -339,7 +363,7 @@ func (rr *KX) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad KX Pref", l} + return &ParseError{err: "bad KX Pref", lex: l} } rr.Preference = uint16(i) @@ -349,7 +373,7 @@ func (rr *KX) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad KX Exchanger", l} + return &ParseError{err: "bad KX Exchanger", lex: l} } rr.Exchanger = name return slurpRemainder(c) @@ -359,7 +383,7 @@ func (rr *CNAME) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad CNAME Target", l} + return &ParseError{err: "bad CNAME Target", lex: l} } rr.Target = name return slurpRemainder(c) @@ -369,7 +393,7 @@ func (rr *DNAME) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad DNAME Target", l} + return &ParseError{err: "bad DNAME Target", lex: l} } rr.Target = name return slurpRemainder(c) @@ -379,7 +403,7 @@ func (rr *SOA) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() ns, nsOk := toAbsoluteName(l.token, o) if l.err || !nsOk { - return &ParseError{"", "bad SOA Ns", l} + return &ParseError{err: "bad SOA Ns", lex: l} } rr.Ns = ns @@ -389,7 +413,7 @@ func (rr *SOA) parse(c *zlexer, o string) *ParseError { mbox, mboxOk := toAbsoluteName(l.token, o) if l.err || !mboxOk { - return &ParseError{"", "bad SOA Mbox", l} + return &ParseError{err: "bad SOA Mbox", lex: l} } rr.Mbox = mbox @@ -402,16 +426,16 @@ func (rr *SOA) parse(c *zlexer, o string) *ParseError { for i := 0; i < 5; i++ { l, _ = c.Next() if l.err { - return &ParseError{"", "bad SOA zone parameter", l} + return &ParseError{err: "bad SOA zone parameter", lex: l} } if j, err := strconv.ParseUint(l.token, 10, 32); err != nil { if i == 0 { // Serial must be a number - return &ParseError{"", "bad SOA zone parameter", l} + return &ParseError{err: "bad SOA zone parameter", lex: l} } // We allow other fields to be unitful duration strings if v, ok = stringToTTL(l.token); !ok { - return &ParseError{"", "bad SOA zone parameter", l} + return &ParseError{err: "bad SOA zone parameter", lex: l} } } else { @@ -441,7 +465,7 @@ func (rr *SRV) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad SRV Priority", l} + return &ParseError{err: "bad SRV Priority", lex: l} } rr.Priority = uint16(i) @@ -449,7 +473,7 @@ func (rr *SRV) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() // zString i, e1 := strconv.ParseUint(l.token, 10, 16) if e1 != nil || l.err { - return &ParseError{"", "bad SRV Weight", l} + return &ParseError{err: "bad SRV Weight", lex: l} } rr.Weight = uint16(i) @@ -457,7 +481,7 @@ func (rr *SRV) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() // zString i, e2 := strconv.ParseUint(l.token, 10, 16) if e2 != nil || l.err { - return &ParseError{"", "bad SRV Port", l} + return &ParseError{err: "bad SRV Port", lex: l} } rr.Port = uint16(i) @@ -467,7 +491,7 @@ func (rr *SRV) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad SRV Target", l} + return &ParseError{err: "bad SRV Target", lex: l} } rr.Target = name return slurpRemainder(c) @@ -477,7 +501,7 @@ func (rr *NAPTR) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad NAPTR Order", l} + return &ParseError{err: "bad NAPTR Order", lex: l} } rr.Order = uint16(i) @@ -485,7 +509,7 @@ func (rr *NAPTR) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() // zString i, e1 := strconv.ParseUint(l.token, 10, 16) if e1 != nil || l.err { - return &ParseError{"", "bad NAPTR Preference", l} + return &ParseError{err: "bad NAPTR Preference", lex: l} } rr.Preference = uint16(i) @@ -493,57 +517,57 @@ func (rr *NAPTR) parse(c *zlexer, o string) *ParseError { c.Next() // zBlank l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Flags", l} + return &ParseError{err: "bad NAPTR Flags", lex: l} } l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Flags = l.token l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Flags", l} + return &ParseError{err: "bad NAPTR Flags", lex: l} } } else if l.value == zQuote { rr.Flags = "" } else { - return &ParseError{"", "bad NAPTR Flags", l} + return &ParseError{err: "bad NAPTR Flags", lex: l} } // Service c.Next() // zBlank l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Service", l} + return &ParseError{err: "bad NAPTR Service", lex: l} } l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Service = l.token l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Service", l} + return &ParseError{err: "bad NAPTR Service", lex: l} } } else if l.value == zQuote { rr.Service = "" } else { - return &ParseError{"", "bad NAPTR Service", l} + return &ParseError{err: "bad NAPTR Service", lex: l} } // Regexp c.Next() // zBlank l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Regexp", l} + return &ParseError{err: "bad NAPTR Regexp", lex: l} } l, _ = c.Next() // Either String or Quote if l.value == zString { rr.Regexp = l.token l, _ = c.Next() // _QUOTE if l.value != zQuote { - return &ParseError{"", "bad NAPTR Regexp", l} + return &ParseError{err: "bad NAPTR Regexp", lex: l} } } else if l.value == zQuote { rr.Regexp = "" } else { - return &ParseError{"", "bad NAPTR Regexp", l} + return &ParseError{err: "bad NAPTR Regexp", lex: l} } // After quote no space?? @@ -553,7 +577,7 @@ func (rr *NAPTR) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad NAPTR Replacement", l} + return &ParseError{err: "bad NAPTR Replacement", lex: l} } rr.Replacement = name return slurpRemainder(c) @@ -563,7 +587,7 @@ func (rr *TALINK) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() previousName, previousNameOk := toAbsoluteName(l.token, o) if l.err || !previousNameOk { - return &ParseError{"", "bad TALINK PreviousName", l} + return &ParseError{err: "bad TALINK PreviousName", lex: l} } rr.PreviousName = previousName @@ -573,7 +597,7 @@ func (rr *TALINK) parse(c *zlexer, o string) *ParseError { nextName, nextNameOk := toAbsoluteName(l.token, o) if l.err || !nextNameOk { - return &ParseError{"", "bad TALINK NextName", l} + return &ParseError{err: "bad TALINK NextName", lex: l} } rr.NextName = nextName @@ -591,7 +615,7 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 32) if e != nil || l.err || i > 90 { - return &ParseError{"", "bad LOC Latitude", l} + return &ParseError{err: "bad LOC Latitude", lex: l} } rr.Latitude = 1000 * 60 * 60 * uint32(i) @@ -602,7 +626,7 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError { goto East } if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 { - return &ParseError{"", "bad LOC Latitude minutes", l} + return &ParseError{err: "bad LOC Latitude minutes", lex: l} } else { rr.Latitude += 1000 * 60 * uint32(i) } @@ -610,7 +634,7 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError { c.Next() // zBlank l, _ = c.Next() if i, err := strconv.ParseFloat(l.token, 64); err != nil || l.err || i < 0 || i >= 60 { - return &ParseError{"", "bad LOC Latitude seconds", l} + return &ParseError{err: "bad LOC Latitude seconds", lex: l} } else { rr.Latitude += uint32(1000 * i) } @@ -621,14 +645,14 @@ func (rr *LOC) parse(c *zlexer, o string) *ParseError { goto East } // If still alive, flag an error - return &ParseError{"", "bad LOC Latitude North/South", l} + return &ParseError{err: "bad LOC Latitude North/South", lex: l} East: // East c.Next() // zBlank l, _ = c.Next() if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 180 { - return &ParseError{"", "bad LOC Longitude", l} + return &ParseError{err: "bad LOC Longitude", lex: l} } else { rr.Longitude = 1000 * 60 * 60 * uint32(i) } @@ -639,14 +663,14 @@ East: goto Altitude } if i, err := strconv.ParseUint(l.token, 10, 32); err != nil || l.err || i > 59 { - return &ParseError{"", "bad LOC Longitude minutes", l} + return &ParseError{err: "bad LOC Longitude minutes", lex: l} } else { rr.Longitude += 1000 * 60 * uint32(i) } c.Next() // zBlank l, _ = c.Next() if i, err := strconv.ParseFloat(l.token, 64); err != nil || l.err || i < 0 || i >= 60 { - return &ParseError{"", "bad LOC Longitude seconds", l} + return &ParseError{err: "bad LOC Longitude seconds", lex: l} } else { rr.Longitude += uint32(1000 * i) } @@ -657,19 +681,19 @@ East: goto Altitude } // If still alive, flag an error - return &ParseError{"", "bad LOC Longitude East/West", l} + return &ParseError{err: "bad LOC Longitude East/West", lex: l} Altitude: c.Next() // zBlank l, _ = c.Next() if l.token == "" || l.err { - return &ParseError{"", "bad LOC Altitude", l} + return &ParseError{err: "bad LOC Altitude", lex: l} } if l.token[len(l.token)-1] == 'M' || l.token[len(l.token)-1] == 'm' { l.token = l.token[0 : len(l.token)-1] } if i, err := strconv.ParseFloat(l.token, 64); err != nil { - return &ParseError{"", "bad LOC Altitude", l} + return &ParseError{err: "bad LOC Altitude", lex: l} } else { rr.Altitude = uint32(i*100.0 + 10000000.0 + 0.5) } @@ -684,19 +708,19 @@ Altitude: case 0: // Size exp, m, ok := stringToCm(l.token) if !ok { - return &ParseError{"", "bad LOC Size", l} + return &ParseError{err: "bad LOC Size", lex: l} } rr.Size = exp&0x0f | m<<4&0xf0 case 1: // HorizPre exp, m, ok := stringToCm(l.token) if !ok { - return &ParseError{"", "bad LOC HorizPre", l} + return &ParseError{err: "bad LOC HorizPre", lex: l} } rr.HorizPre = exp&0x0f | m<<4&0xf0 case 2: // VertPre exp, m, ok := stringToCm(l.token) if !ok { - return &ParseError{"", "bad LOC VertPre", l} + return &ParseError{err: "bad LOC VertPre", lex: l} } rr.VertPre = exp&0x0f | m<<4&0xf0 } @@ -704,7 +728,7 @@ Altitude: case zBlank: // Ok default: - return &ParseError{"", "bad LOC Size, HorizPre or VertPre", l} + return &ParseError{err: "bad LOC Size, HorizPre or VertPre", lex: l} } l, _ = c.Next() } @@ -716,14 +740,14 @@ func (rr *HIP) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad HIP PublicKeyAlgorithm", l} + return &ParseError{err: "bad HIP PublicKeyAlgorithm", lex: l} } rr.PublicKeyAlgorithm = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString if l.token == "" || l.err { - return &ParseError{"", "bad HIP Hit", l} + return &ParseError{err: "bad HIP Hit", lex: l} } rr.Hit = l.token // This can not contain spaces, see RFC 5205 Section 6. rr.HitLength = uint8(len(rr.Hit)) / 2 @@ -731,12 +755,12 @@ func (rr *HIP) parse(c *zlexer, o string) *ParseError { c.Next() // zBlank l, _ = c.Next() // zString if l.token == "" || l.err { - return &ParseError{"", "bad HIP PublicKey", l} + return &ParseError{err: "bad HIP PublicKey", lex: l} } rr.PublicKey = l.token // This cannot contain spaces decodedPK, decodedPKerr := base64.StdEncoding.DecodeString(rr.PublicKey) if decodedPKerr != nil { - return &ParseError{"", "bad HIP PublicKey", l} + return &ParseError{err: "bad HIP PublicKey", lex: l} } rr.PublicKeyLength = uint16(len(decodedPK)) @@ -748,13 +772,13 @@ func (rr *HIP) parse(c *zlexer, o string) *ParseError { case zString: name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad HIP RendezvousServers", l} + return &ParseError{err: "bad HIP RendezvousServers", lex: l} } xs = append(xs, name) case zBlank: // Ok default: - return &ParseError{"", "bad HIP RendezvousServers", l} + return &ParseError{err: "bad HIP RendezvousServers", lex: l} } l, _ = c.Next() } @@ -768,7 +792,7 @@ func (rr *CERT) parse(c *zlexer, o string) *ParseError { if v, ok := StringToCertType[l.token]; ok { rr.Type = v } else if i, err := strconv.ParseUint(l.token, 10, 16); err != nil { - return &ParseError{"", "bad CERT Type", l} + return &ParseError{err: "bad CERT Type", lex: l} } else { rr.Type = uint16(i) } @@ -776,7 +800,7 @@ func (rr *CERT) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() // zString i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad CERT KeyTag", l} + return &ParseError{err: "bad CERT KeyTag", lex: l} } rr.KeyTag = uint16(i) c.Next() // zBlank @@ -784,7 +808,7 @@ func (rr *CERT) parse(c *zlexer, o string) *ParseError { if v, ok := StringToAlgorithm[l.token]; ok { rr.Algorithm = v } else if i, err := strconv.ParseUint(l.token, 10, 8); err != nil { - return &ParseError{"", "bad CERT Algorithm", l} + return &ParseError{err: "bad CERT Algorithm", lex: l} } else { rr.Algorithm = uint8(i) } @@ -810,7 +834,7 @@ func (rr *CSYNC) parse(c *zlexer, o string) *ParseError { j, e := strconv.ParseUint(l.token, 10, 32) if e != nil { // Serial must be a number - return &ParseError{"", "bad CSYNC serial", l} + return &ParseError{err: "bad CSYNC serial", lex: l} } rr.Serial = uint32(j) @@ -820,7 +844,7 @@ func (rr *CSYNC) parse(c *zlexer, o string) *ParseError { j, e1 := strconv.ParseUint(l.token, 10, 16) if e1 != nil { // Serial must be a number - return &ParseError{"", "bad CSYNC flags", l} + return &ParseError{err: "bad CSYNC flags", lex: l} } rr.Flags = uint16(j) @@ -838,12 +862,12 @@ func (rr *CSYNC) parse(c *zlexer, o string) *ParseError { tokenUpper := strings.ToUpper(l.token) if k, ok = StringToType[tokenUpper]; !ok { if k, ok = typeToInt(l.token); !ok { - return &ParseError{"", "bad CSYNC TypeBitMap", l} + return &ParseError{err: "bad CSYNC TypeBitMap", lex: l} } } rr.TypeBitMap = append(rr.TypeBitMap, k) default: - return &ParseError{"", "bad CSYNC TypeBitMap", l} + return &ParseError{err: "bad CSYNC TypeBitMap", lex: l} } l, _ = c.Next() } @@ -854,7 +878,7 @@ func (rr *ZONEMD) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 32) if e != nil || l.err { - return &ParseError{"", "bad ZONEMD Serial", l} + return &ParseError{err: "bad ZONEMD Serial", lex: l} } rr.Serial = uint32(i) @@ -862,7 +886,7 @@ func (rr *ZONEMD) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad ZONEMD Scheme", l} + return &ParseError{err: "bad ZONEMD Scheme", lex: l} } rr.Scheme = uint8(i) @@ -870,7 +894,7 @@ func (rr *ZONEMD) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, err := strconv.ParseUint(l.token, 10, 8) if err != nil || l.err { - return &ParseError{"", "bad ZONEMD Hash Algorithm", l} + return &ParseError{err: "bad ZONEMD Hash Algorithm", lex: l} } rr.Hash = uint8(i) @@ -891,11 +915,11 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { if strings.HasPrefix(tokenUpper, "TYPE") { t, ok = typeToInt(l.token) if !ok { - return &ParseError{"", "bad RRSIG Typecovered", l} + return &ParseError{err: "bad RRSIG Typecovered", lex: l} } rr.TypeCovered = t } else { - return &ParseError{"", "bad RRSIG Typecovered", l} + return &ParseError{err: "bad RRSIG Typecovered", lex: l} } } else { rr.TypeCovered = t @@ -903,17 +927,24 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { c.Next() // zBlank l, _ = c.Next() - i, e := strconv.ParseUint(l.token, 10, 8) - if e != nil || l.err { - return &ParseError{"", "bad RRSIG Algorithm", l} + if l.err { + return &ParseError{err: "bad RRSIG Algorithm", lex: l} + } + i, e := strconv.ParseUint(l.token, 10, 8) + rr.Algorithm = uint8(i) // if 0 we'll check the mnemonic in the if + if e != nil { + v, ok := StringToAlgorithm[l.token] + if !ok { + return &ParseError{err: "bad RRSIG Algorithm", lex: l} + } + rr.Algorithm = v } - rr.Algorithm = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad RRSIG Labels", l} + return &ParseError{err: "bad RRSIG Labels", lex: l} } rr.Labels = uint8(i) @@ -921,7 +952,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e2 := strconv.ParseUint(l.token, 10, 32) if e2 != nil || l.err { - return &ParseError{"", "bad RRSIG OrigTtl", l} + return &ParseError{err: "bad RRSIG OrigTtl", lex: l} } rr.OrigTtl = uint32(i) @@ -932,7 +963,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { if i, err := strconv.ParseUint(l.token, 10, 32); err == nil { rr.Expiration = uint32(i) } else { - return &ParseError{"", "bad RRSIG Expiration", l} + return &ParseError{err: "bad RRSIG Expiration", lex: l} } } else { rr.Expiration = i @@ -944,7 +975,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { if i, err := strconv.ParseUint(l.token, 10, 32); err == nil { rr.Inception = uint32(i) } else { - return &ParseError{"", "bad RRSIG Inception", l} + return &ParseError{err: "bad RRSIG Inception", lex: l} } } else { rr.Inception = i @@ -954,7 +985,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e3 := strconv.ParseUint(l.token, 10, 16) if e3 != nil || l.err { - return &ParseError{"", "bad RRSIG KeyTag", l} + return &ParseError{err: "bad RRSIG KeyTag", lex: l} } rr.KeyTag = uint16(i) @@ -963,7 +994,7 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { rr.SignerName = l.token name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad RRSIG SignerName", l} + return &ParseError{err: "bad RRSIG SignerName", lex: l} } rr.SignerName = name @@ -976,11 +1007,13 @@ func (rr *RRSIG) parse(c *zlexer, o string) *ParseError { return nil } +func (rr *NXT) parse(c *zlexer, o string) *ParseError { return rr.NSEC.parse(c, o) } + func (rr *NSEC) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad NSEC NextDomain", l} + return &ParseError{err: "bad NSEC NextDomain", lex: l} } rr.NextDomain = name @@ -998,12 +1031,12 @@ func (rr *NSEC) parse(c *zlexer, o string) *ParseError { tokenUpper := strings.ToUpper(l.token) if k, ok = StringToType[tokenUpper]; !ok { if k, ok = typeToInt(l.token); !ok { - return &ParseError{"", "bad NSEC TypeBitMap", l} + return &ParseError{err: "bad NSEC TypeBitMap", lex: l} } } rr.TypeBitMap = append(rr.TypeBitMap, k) default: - return &ParseError{"", "bad NSEC TypeBitMap", l} + return &ParseError{err: "bad NSEC TypeBitMap", lex: l} } l, _ = c.Next() } @@ -1014,27 +1047,27 @@ func (rr *NSEC3) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad NSEC3 Hash", l} + return &ParseError{err: "bad NSEC3 Hash", lex: l} } rr.Hash = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad NSEC3 Flags", l} + return &ParseError{err: "bad NSEC3 Flags", lex: l} } rr.Flags = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e2 := strconv.ParseUint(l.token, 10, 16) if e2 != nil || l.err { - return &ParseError{"", "bad NSEC3 Iterations", l} + return &ParseError{err: "bad NSEC3 Iterations", lex: l} } rr.Iterations = uint16(i) c.Next() l, _ = c.Next() if l.token == "" || l.err { - return &ParseError{"", "bad NSEC3 Salt", l} + return &ParseError{err: "bad NSEC3 Salt", lex: l} } if l.token != "-" { rr.SaltLength = uint8(len(l.token)) / 2 @@ -1044,7 +1077,7 @@ func (rr *NSEC3) parse(c *zlexer, o string) *ParseError { c.Next() l, _ = c.Next() if l.token == "" || l.err { - return &ParseError{"", "bad NSEC3 NextDomain", l} + return &ParseError{err: "bad NSEC3 NextDomain", lex: l} } rr.HashLength = 20 // Fix for NSEC3 (sha1 160 bits) rr.NextDomain = l.token @@ -1063,12 +1096,12 @@ func (rr *NSEC3) parse(c *zlexer, o string) *ParseError { tokenUpper := strings.ToUpper(l.token) if k, ok = StringToType[tokenUpper]; !ok { if k, ok = typeToInt(l.token); !ok { - return &ParseError{"", "bad NSEC3 TypeBitMap", l} + return &ParseError{err: "bad NSEC3 TypeBitMap", lex: l} } } rr.TypeBitMap = append(rr.TypeBitMap, k) default: - return &ParseError{"", "bad NSEC3 TypeBitMap", l} + return &ParseError{err: "bad NSEC3 TypeBitMap", lex: l} } l, _ = c.Next() } @@ -1079,21 +1112,21 @@ func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad NSEC3PARAM Hash", l} + return &ParseError{err: "bad NSEC3PARAM Hash", lex: l} } rr.Hash = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad NSEC3PARAM Flags", l} + return &ParseError{err: "bad NSEC3PARAM Flags", lex: l} } rr.Flags = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e2 := strconv.ParseUint(l.token, 10, 16) if e2 != nil || l.err { - return &ParseError{"", "bad NSEC3PARAM Iterations", l} + return &ParseError{err: "bad NSEC3PARAM Iterations", lex: l} } rr.Iterations = uint16(i) c.Next() @@ -1108,7 +1141,7 @@ func (rr *NSEC3PARAM) parse(c *zlexer, o string) *ParseError { func (rr *EUI48) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() if len(l.token) != 17 || l.err { - return &ParseError{"", "bad EUI48 Address", l} + return &ParseError{err: "bad EUI48 Address", lex: l} } addr := make([]byte, 12) dash := 0 @@ -1117,7 +1150,7 @@ func (rr *EUI48) parse(c *zlexer, o string) *ParseError { addr[i+1] = l.token[i+1+dash] dash++ if l.token[i+1+dash] != '-' { - return &ParseError{"", "bad EUI48 Address", l} + return &ParseError{err: "bad EUI48 Address", lex: l} } } addr[10] = l.token[15] @@ -1125,7 +1158,7 @@ func (rr *EUI48) parse(c *zlexer, o string) *ParseError { i, e := strconv.ParseUint(string(addr), 16, 48) if e != nil { - return &ParseError{"", "bad EUI48 Address", l} + return &ParseError{err: "bad EUI48 Address", lex: l} } rr.Address = i return slurpRemainder(c) @@ -1134,7 +1167,7 @@ func (rr *EUI48) parse(c *zlexer, o string) *ParseError { func (rr *EUI64) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() if len(l.token) != 23 || l.err { - return &ParseError{"", "bad EUI64 Address", l} + return &ParseError{err: "bad EUI64 Address", lex: l} } addr := make([]byte, 16) dash := 0 @@ -1143,7 +1176,7 @@ func (rr *EUI64) parse(c *zlexer, o string) *ParseError { addr[i+1] = l.token[i+1+dash] dash++ if l.token[i+1+dash] != '-' { - return &ParseError{"", "bad EUI64 Address", l} + return &ParseError{err: "bad EUI64 Address", lex: l} } } addr[14] = l.token[21] @@ -1151,7 +1184,7 @@ func (rr *EUI64) parse(c *zlexer, o string) *ParseError { i, e := strconv.ParseUint(string(addr), 16, 64) if e != nil { - return &ParseError{"", "bad EUI68 Address", l} + return &ParseError{err: "bad EUI68 Address", lex: l} } rr.Address = i return slurpRemainder(c) @@ -1161,14 +1194,14 @@ func (rr *SSHFP) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad SSHFP Algorithm", l} + return &ParseError{err: "bad SSHFP Algorithm", lex: l} } rr.Algorithm = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad SSHFP Type", l} + return &ParseError{err: "bad SSHFP Type", lex: l} } rr.Type = uint8(i) c.Next() // zBlank @@ -1184,21 +1217,21 @@ func (rr *DNSKEY) parseDNSKEY(c *zlexer, o, typ string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad " + typ + " Flags", l} + return &ParseError{err: "bad " + typ + " Flags", lex: l} } rr.Flags = uint16(i) c.Next() // zBlank l, _ = c.Next() // zString i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad " + typ + " Protocol", l} + return &ParseError{err: "bad " + typ + " Protocol", lex: l} } rr.Protocol = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString i, e2 := strconv.ParseUint(l.token, 10, 8) if e2 != nil || l.err { - return &ParseError{"", "bad " + typ + " Algorithm", l} + return &ParseError{err: "bad " + typ + " Algorithm", lex: l} } rr.Algorithm = uint8(i) s, e3 := endingToString(c, "bad "+typ+" PublicKey") @@ -1216,25 +1249,136 @@ func (rr *DS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, func (rr *DLV) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "DLV") } func (rr *CDS) parse(c *zlexer, o string) *ParseError { return rr.parseDS(c, o, "CDS") } +func (rr *IPSECKEY) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + num, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{err: "bad IPSECKEY value", lex: l} + } + rr.Precedence = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + num, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{err: "bad IPSECKEY value", lex: l} + } + rr.GatewayType = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + num, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{err: "bad IPSECKEY value", lex: l} + } + rr.Algorithm = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + if l.err { + return &ParseError{err: "bad IPSECKEY gateway", lex: l} + } + + rr.GatewayAddr, rr.GatewayHost, err = parseAddrHostUnion(l.token, o, rr.GatewayType) + if err != nil { + return &ParseError{wrappedErr: fmt.Errorf("IPSECKEY %w", err), lex: l} + } + + c.Next() // zBlank + + s, pErr := endingToString(c, "bad IPSECKEY PublicKey") + if pErr != nil { + return pErr + } + rr.PublicKey = s + return slurpRemainder(c) +} + +func (rr *AMTRELAY) parse(c *zlexer, o string) *ParseError { + l, _ := c.Next() + num, err := strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{err: "bad AMTRELAY value", lex: l} + } + rr.Precedence = uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + if l.err || !(l.token == "0" || l.token == "1") { + return &ParseError{err: "bad discovery value", lex: l} + } + if l.token == "1" { + rr.GatewayType = 0x80 + } + + c.Next() // zBlank + + l, _ = c.Next() + num, err = strconv.ParseUint(l.token, 10, 8) + if err != nil || l.err { + return &ParseError{err: "bad AMTRELAY value", lex: l} + } + rr.GatewayType |= uint8(num) + c.Next() // zBlank + + l, _ = c.Next() + if l.err { + return &ParseError{err: "bad AMTRELAY gateway", lex: l} + } + + rr.GatewayAddr, rr.GatewayHost, err = parseAddrHostUnion(l.token, o, rr.GatewayType&0x7f) + if err != nil { + return &ParseError{wrappedErr: fmt.Errorf("AMTRELAY %w", err), lex: l} + } + + return slurpRemainder(c) +} + +// same constants and parsing between IPSECKEY and AMTRELAY +func parseAddrHostUnion(token, o string, gatewayType uint8) (addr net.IP, host string, err error) { + switch gatewayType { + case IPSECGatewayNone: + if token != "." { + return addr, host, errors.New("gateway type none with gateway set") + } + case IPSECGatewayIPv4, IPSECGatewayIPv6: + addr = net.ParseIP(token) + if addr == nil { + return addr, host, errors.New("gateway IP invalid") + } + if (addr.To4() == nil) == (gatewayType == IPSECGatewayIPv4) { + return addr, host, errors.New("gateway IP family mismatch") + } + case IPSECGatewayHost: + var ok bool + host, ok = toAbsoluteName(token, o) + if !ok { + return addr, host, errors.New("invalid gateway host") + } + } + + return addr, host, nil +} + func (rr *RKEY) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad RKEY Flags", l} + return &ParseError{err: "bad RKEY Flags", lex: l} } rr.Flags = uint16(i) c.Next() // zBlank l, _ = c.Next() // zString i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad RKEY Protocol", l} + return &ParseError{err: "bad RKEY Protocol", lex: l} } rr.Protocol = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString i, e2 := strconv.ParseUint(l.token, 10, 8) if e2 != nil || l.err { - return &ParseError{"", "bad RKEY Algorithm", l} + return &ParseError{err: "bad RKEY Algorithm", lex: l} } rr.Algorithm = uint8(i) s, e3 := endingToString(c, "bad RKEY PublicKey") @@ -1267,21 +1411,21 @@ func (rr *GPOS) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() _, e := strconv.ParseFloat(l.token, 64) if e != nil || l.err { - return &ParseError{"", "bad GPOS Longitude", l} + return &ParseError{err: "bad GPOS Longitude", lex: l} } rr.Longitude = l.token c.Next() // zBlank l, _ = c.Next() _, e1 := strconv.ParseFloat(l.token, 64) if e1 != nil || l.err { - return &ParseError{"", "bad GPOS Latitude", l} + return &ParseError{err: "bad GPOS Latitude", lex: l} } rr.Latitude = l.token c.Next() // zBlank l, _ = c.Next() _, e2 := strconv.ParseFloat(l.token, 64) if e2 != nil || l.err { - return &ParseError{"", "bad GPOS Altitude", l} + return &ParseError{err: "bad GPOS Altitude", lex: l} } rr.Altitude = l.token return slurpRemainder(c) @@ -1291,7 +1435,7 @@ func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad " + typ + " KeyTag", l} + return &ParseError{err: "bad " + typ + " KeyTag", lex: l} } rr.KeyTag = uint16(i) c.Next() // zBlank @@ -1300,7 +1444,7 @@ func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError { tokenUpper := strings.ToUpper(l.token) i, ok := StringToAlgorithm[tokenUpper] if !ok || l.err { - return &ParseError{"", "bad " + typ + " Algorithm", l} + return &ParseError{err: "bad " + typ + " Algorithm", lex: l} } rr.Algorithm = i } else { @@ -1310,7 +1454,7 @@ func (rr *DS) parseDS(c *zlexer, o, typ string) *ParseError { l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad " + typ + " DigestType", l} + return &ParseError{err: "bad " + typ + " DigestType", lex: l} } rr.DigestType = uint8(i) s, e2 := endingToString(c, "bad "+typ+" Digest") @@ -1325,7 +1469,7 @@ func (rr *TA) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad TA KeyTag", l} + return &ParseError{err: "bad TA KeyTag", lex: l} } rr.KeyTag = uint16(i) c.Next() // zBlank @@ -1334,7 +1478,7 @@ func (rr *TA) parse(c *zlexer, o string) *ParseError { tokenUpper := strings.ToUpper(l.token) i, ok := StringToAlgorithm[tokenUpper] if !ok || l.err { - return &ParseError{"", "bad TA Algorithm", l} + return &ParseError{err: "bad TA Algorithm", lex: l} } rr.Algorithm = i } else { @@ -1344,7 +1488,7 @@ func (rr *TA) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad TA DigestType", l} + return &ParseError{err: "bad TA DigestType", lex: l} } rr.DigestType = uint8(i) s, e2 := endingToString(c, "bad TA Digest") @@ -1359,21 +1503,21 @@ func (rr *TLSA) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad TLSA Usage", l} + return &ParseError{err: "bad TLSA Usage", lex: l} } rr.Usage = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad TLSA Selector", l} + return &ParseError{err: "bad TLSA Selector", lex: l} } rr.Selector = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e2 := strconv.ParseUint(l.token, 10, 8) if e2 != nil || l.err { - return &ParseError{"", "bad TLSA MatchingType", l} + return &ParseError{err: "bad TLSA MatchingType", lex: l} } rr.MatchingType = uint8(i) // So this needs be e2 (i.e. different than e), because...??t @@ -1389,21 +1533,21 @@ func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad SMIMEA Usage", l} + return &ParseError{err: "bad SMIMEA Usage", lex: l} } rr.Usage = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad SMIMEA Selector", l} + return &ParseError{err: "bad SMIMEA Selector", lex: l} } rr.Selector = uint8(i) c.Next() // zBlank l, _ = c.Next() i, e2 := strconv.ParseUint(l.token, 10, 8) if e2 != nil || l.err { - return &ParseError{"", "bad SMIMEA MatchingType", l} + return &ParseError{err: "bad SMIMEA MatchingType", lex: l} } rr.MatchingType = uint8(i) // So this needs be e2 (i.e. different than e), because...??t @@ -1418,14 +1562,14 @@ func (rr *SMIMEA) parse(c *zlexer, o string) *ParseError { func (rr *RFC3597) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() if l.token != "\\#" { - return &ParseError{"", "bad RFC3597 Rdata", l} + return &ParseError{err: "bad RFC3597 Rdata", lex: l} } c.Next() // zBlank l, _ = c.Next() rdlength, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad RFC3597 Rdata ", l} + return &ParseError{err: "bad RFC3597 Rdata ", lex: l} } s, e1 := endingToString(c, "bad RFC3597 Rdata") @@ -1433,7 +1577,7 @@ func (rr *RFC3597) parse(c *zlexer, o string) *ParseError { return e1 } if int(rdlength)*2 != len(s) { - return &ParseError{"", "bad RFC3597 Rdata", l} + return &ParseError{err: "bad RFC3597 Rdata", lex: l} } rr.Rdata = s return nil @@ -1481,14 +1625,14 @@ func (rr *URI) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad URI Priority", l} + return &ParseError{err: "bad URI Priority", lex: l} } rr.Priority = uint16(i) c.Next() // zBlank l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 16) if e1 != nil || l.err { - return &ParseError{"", "bad URI Weight", l} + return &ParseError{err: "bad URI Weight", lex: l} } rr.Weight = uint16(i) @@ -1498,7 +1642,7 @@ func (rr *URI) parse(c *zlexer, o string) *ParseError { return e2 } if len(s) != 1 { - return &ParseError{"", "bad URI Target", l} + return &ParseError{err: "bad URI Target", lex: l} } rr.Target = s[0] return nil @@ -1518,7 +1662,7 @@ func (rr *NID) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad NID Preference", l} + return &ParseError{err: "bad NID Preference", lex: l} } rr.Preference = uint16(i) c.Next() // zBlank @@ -1535,14 +1679,14 @@ func (rr *L32) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad L32 Preference", l} + return &ParseError{err: "bad L32 Preference", lex: l} } rr.Preference = uint16(i) c.Next() // zBlank l, _ = c.Next() // zString rr.Locator32 = net.ParseIP(l.token) if rr.Locator32 == nil || l.err { - return &ParseError{"", "bad L32 Locator", l} + return &ParseError{err: "bad L32 Locator", lex: l} } return slurpRemainder(c) } @@ -1551,7 +1695,7 @@ func (rr *LP) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad LP Preference", l} + return &ParseError{err: "bad LP Preference", lex: l} } rr.Preference = uint16(i) @@ -1560,7 +1704,7 @@ func (rr *LP) parse(c *zlexer, o string) *ParseError { rr.Fqdn = l.token name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{"", "bad LP Fqdn", l} + return &ParseError{err: "bad LP Fqdn", lex: l} } rr.Fqdn = name return slurpRemainder(c) @@ -1570,7 +1714,7 @@ func (rr *L64) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad L64 Preference", l} + return &ParseError{err: "bad L64 Preference", lex: l} } rr.Preference = uint16(i) c.Next() // zBlank @@ -1587,7 +1731,7 @@ func (rr *UID) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 32) if e != nil || l.err { - return &ParseError{"", "bad UID Uid", l} + return &ParseError{err: "bad UID Uid", lex: l} } rr.Uid = uint32(i) return slurpRemainder(c) @@ -1597,7 +1741,7 @@ func (rr *GID) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 32) if e != nil || l.err { - return &ParseError{"", "bad GID Gid", l} + return &ParseError{err: "bad GID Gid", lex: l} } rr.Gid = uint32(i) return slurpRemainder(c) @@ -1619,7 +1763,7 @@ func (rr *PX) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{"", "bad PX Preference", l} + return &ParseError{err: "bad PX Preference", lex: l} } rr.Preference = uint16(i) @@ -1628,7 +1772,7 @@ func (rr *PX) parse(c *zlexer, o string) *ParseError { rr.Map822 = l.token map822, map822Ok := toAbsoluteName(l.token, o) if l.err || !map822Ok { - return &ParseError{"", "bad PX Map822", l} + return &ParseError{err: "bad PX Map822", lex: l} } rr.Map822 = map822 @@ -1637,7 +1781,7 @@ func (rr *PX) parse(c *zlexer, o string) *ParseError { rr.Mapx400 = l.token mapx400, mapx400Ok := toAbsoluteName(l.token, o) if l.err || !mapx400Ok { - return &ParseError{"", "bad PX Mapx400", l} + return &ParseError{err: "bad PX Mapx400", lex: l} } rr.Mapx400 = mapx400 return slurpRemainder(c) @@ -1647,14 +1791,14 @@ func (rr *CAA) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad CAA Flag", l} + return &ParseError{err: "bad CAA Flag", lex: l} } rr.Flag = uint8(i) c.Next() // zBlank l, _ = c.Next() // zString if l.value != zString { - return &ParseError{"", "bad CAA Tag", l} + return &ParseError{err: "bad CAA Tag", lex: l} } rr.Tag = l.token @@ -1664,7 +1808,7 @@ func (rr *CAA) parse(c *zlexer, o string) *ParseError { return e1 } if len(s) != 1 { - return &ParseError{"", "bad CAA Value", l} + return &ParseError{err: "bad CAA Value", lex: l} } rr.Value = s[0] return nil @@ -1675,7 +1819,7 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError { // Algorithm if l.value != zString { - return &ParseError{"", "bad TKEY algorithm", l} + return &ParseError{err: "bad TKEY algorithm", lex: l} } rr.Algorithm = l.token c.Next() // zBlank @@ -1684,13 +1828,13 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e := strconv.ParseUint(l.token, 10, 8) if e != nil || l.err { - return &ParseError{"", "bad TKEY key length", l} + return &ParseError{err: "bad TKEY key length", lex: l} } rr.KeySize = uint16(i) c.Next() // zBlank l, _ = c.Next() if l.value != zString { - return &ParseError{"", "bad TKEY key", l} + return &ParseError{err: "bad TKEY key", lex: l} } rr.Key = l.token c.Next() // zBlank @@ -1699,13 +1843,13 @@ func (rr *TKEY) parse(c *zlexer, o string) *ParseError { l, _ = c.Next() i, e1 := strconv.ParseUint(l.token, 10, 8) if e1 != nil || l.err { - return &ParseError{"", "bad TKEY otherdata length", l} + return &ParseError{err: "bad TKEY otherdata length", lex: l} } rr.OtherLen = uint16(i) c.Next() // zBlank l, _ = c.Next() if l.value != zString { - return &ParseError{"", "bad TKEY otherday", l} + return &ParseError{err: "bad TKEY otherday", lex: l} } rr.OtherData = l.token return nil @@ -1723,14 +1867,14 @@ func (rr *APL) parse(c *zlexer, o string) *ParseError { continue } if l.value != zString { - return &ParseError{"", "unexpected APL field", l} + return &ParseError{err: "unexpected APL field", lex: l} } // Expected format: [!]afi:address/prefix colon := strings.IndexByte(l.token, ':') if colon == -1 { - return &ParseError{"", "missing colon in APL field", l} + return &ParseError{err: "missing colon in APL field", lex: l} } family, cidr := l.token[:colon], l.token[colon+1:] @@ -1743,7 +1887,7 @@ func (rr *APL) parse(c *zlexer, o string) *ParseError { afi, e := strconv.ParseUint(family, 10, 16) if e != nil { - return &ParseError{"", "failed to parse APL family: " + e.Error(), l} + return &ParseError{wrappedErr: fmt.Errorf("failed to parse APL family: %w", e), lex: l} } var addrLen int switch afi { @@ -1752,19 +1896,19 @@ func (rr *APL) parse(c *zlexer, o string) *ParseError { case 2: addrLen = net.IPv6len default: - return &ParseError{"", "unrecognized APL family", l} + return &ParseError{err: "unrecognized APL family", lex: l} } ip, subnet, e1 := net.ParseCIDR(cidr) if e1 != nil { - return &ParseError{"", "failed to parse APL address: " + e1.Error(), l} + return &ParseError{wrappedErr: fmt.Errorf("failed to parse APL address: %w", e1), lex: l} } if !ip.Equal(subnet.IP) { - return &ParseError{"", "extra bits in APL address", l} + return &ParseError{err: "extra bits in APL address", lex: l} } if len(subnet.IP) != addrLen { - return &ParseError{"", "address mismatch with the APL family", l} + return &ParseError{err: "address mismatch with the APL family", lex: l} } prefixes = append(prefixes, APLPrefix{ diff --git a/vendor/github.com/miekg/dns/server.go b/vendor/github.com/miekg/dns/server.go index 4e5a9aa8..0207d6da 100644 --- a/vendor/github.com/miekg/dns/server.go +++ b/vendor/github.com/miekg/dns/server.go @@ -18,7 +18,7 @@ import ( const maxTCPQueries = 128 // aLongTimeAgo is a non-zero time, far in the past, used for -// immediate cancelation of network operations. +// immediate cancellation of network operations. var aLongTimeAgo = time.Unix(1, 0) // Handler is implemented by any value that implements ServeDNS. @@ -224,8 +224,12 @@ type Server struct { // Maximum number of TCP queries before we close the socket. Default is maxTCPQueries (unlimited if -1). MaxTCPQueries int // Whether to set the SO_REUSEPORT socket option, allowing multiple listeners to be bound to a single address. - // It is only supported on go1.11+ and when using ListenAndServe. + // It is only supported on certain GOOSes and when using ListenAndServe. ReusePort bool + // Whether to set the SO_REUSEADDR socket option, allowing multiple listeners to be bound to a single address. + // Crucially this allows binding when an existing server is listening on `0.0.0.0` or `::`. + // It is only supported on certain GOOSes and when using ListenAndServe. + ReuseAddr bool // AcceptMsgFunc will check the incoming message and will reject it early in the process. // By default DefaultMsgAcceptFunc will be used. MsgAcceptFunc MsgAcceptFunc @@ -304,7 +308,7 @@ func (srv *Server) ListenAndServe() error { switch srv.Net { case "tcp", "tcp4", "tcp6": - l, err := listenTCP(srv.Net, addr, srv.ReusePort) + l, err := listenTCP(srv.Net, addr, srv.ReusePort, srv.ReuseAddr) if err != nil { return err } @@ -317,7 +321,7 @@ func (srv *Server) ListenAndServe() error { return errors.New("dns: neither Certificates nor GetCertificate set in Config") } network := strings.TrimSuffix(srv.Net, "-tls") - l, err := listenTCP(network, addr, srv.ReusePort) + l, err := listenTCP(network, addr, srv.ReusePort, srv.ReuseAddr) if err != nil { return err } @@ -327,7 +331,7 @@ func (srv *Server) ListenAndServe() error { unlock() return srv.serveTCP(l) case "udp", "udp4", "udp6": - l, err := listenUDP(srv.Net, addr, srv.ReusePort) + l, err := listenUDP(srv.Net, addr, srv.ReusePort, srv.ReuseAddr) if err != nil { return err } diff --git a/vendor/github.com/miekg/dns/singleinflight.go b/vendor/github.com/miekg/dns/singleinflight.go deleted file mode 100644 index febcc300..00000000 --- a/vendor/github.com/miekg/dns/singleinflight.go +++ /dev/null @@ -1,61 +0,0 @@ -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Adapted for dns package usage by Miek Gieben. - -package dns - -import "sync" -import "time" - -// call is an in-flight or completed singleflight.Do call -type call struct { - wg sync.WaitGroup - val *Msg - rtt time.Duration - err error - dups int -} - -// singleflight represents a class of work and forms a namespace in -// which units of work can be executed with duplicate suppression. -type singleflight struct { - sync.Mutex // protects m - m map[string]*call // lazily initialized - - dontDeleteForTesting bool // this is only to be used by TestConcurrentExchanges -} - -// Do executes and returns the results of the given function, making -// sure that only one execution is in-flight for a given key at a -// time. If a duplicate comes in, the duplicate caller waits for the -// original to complete and receives the same results. -// The return value shared indicates whether v was given to multiple callers. -func (g *singleflight) Do(key string, fn func() (*Msg, time.Duration, error)) (v *Msg, rtt time.Duration, err error, shared bool) { - g.Lock() - if g.m == nil { - g.m = make(map[string]*call) - } - if c, ok := g.m[key]; ok { - c.dups++ - g.Unlock() - c.wg.Wait() - return c.val, c.rtt, c.err, true - } - c := new(call) - c.wg.Add(1) - g.m[key] = c - g.Unlock() - - c.val, c.rtt, c.err = fn() - c.wg.Done() - - if !g.dontDeleteForTesting { - g.Lock() - delete(g.m, key) - g.Unlock() - } - - return c.val, c.rtt, c.err, c.dups > 0 -} diff --git a/vendor/github.com/miekg/dns/svcb.go b/vendor/github.com/miekg/dns/svcb.go index ea58710d..c1a740b6 100644 --- a/vendor/github.com/miekg/dns/svcb.go +++ b/vendor/github.com/miekg/dns/svcb.go @@ -85,7 +85,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError { l, _ := c.Next() i, e := strconv.ParseUint(l.token, 10, 16) if e != nil || l.err { - return &ParseError{l.token, "bad SVCB priority", l} + return &ParseError{file: l.token, err: "bad SVCB priority", lex: l} } rr.Priority = uint16(i) @@ -95,7 +95,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError { name, nameOk := toAbsoluteName(l.token, o) if l.err || !nameOk { - return &ParseError{l.token, "bad SVCB Target", l} + return &ParseError{file: l.token, err: "bad SVCB Target", lex: l} } rr.Target = name @@ -111,7 +111,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError { if !canHaveNextKey { // The key we can now read was probably meant to be // a part of the last value. - return &ParseError{l.token, "bad SVCB value quotation", l} + return &ParseError{file: l.token, err: "bad SVCB value quotation", lex: l} } // In key=value pairs, value does not have to be quoted unless value @@ -124,7 +124,7 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError { // Key with no value and no equality sign key = l.token } else if idx == 0 { - return &ParseError{l.token, "bad SVCB key", l} + return &ParseError{file: l.token, err: "bad SVCB key", lex: l} } else { key, value = l.token[:idx], l.token[idx+1:] @@ -144,30 +144,30 @@ func (rr *SVCB) parse(c *zlexer, o string) *ParseError { value = l.token l, _ = c.Next() if l.value != zQuote { - return &ParseError{l.token, "SVCB unterminated value", l} + return &ParseError{file: l.token, err: "SVCB unterminated value", lex: l} } case zQuote: // There's nothing in double quotes. default: - return &ParseError{l.token, "bad SVCB value", l} + return &ParseError{file: l.token, err: "bad SVCB value", lex: l} } } } } kv := makeSVCBKeyValue(svcbStringToKey(key)) if kv == nil { - return &ParseError{l.token, "bad SVCB key", l} + return &ParseError{file: l.token, err: "bad SVCB key", lex: l} } if err := kv.parse(value); err != nil { - return &ParseError{l.token, err.Error(), l} + return &ParseError{file: l.token, wrappedErr: err, lex: l} } xs = append(xs, kv) case zQuote: - return &ParseError{l.token, "SVCB key can't contain double quotes", l} + return &ParseError{file: l.token, err: "SVCB key can't contain double quotes", lex: l} case zBlank: canHaveNextKey = true default: - return &ParseError{l.token, "bad SVCB values", l} + return &ParseError{file: l.token, err: "bad SVCB values", lex: l} } l, _ = c.Next() } @@ -289,7 +289,7 @@ func (s *SVCBMandatory) String() string { } func (s *SVCBMandatory) pack() ([]byte, error) { - codes := append([]SVCBKey(nil), s.Code...) + codes := cloneSlice(s.Code) sort.Slice(codes, func(i, j int) bool { return codes[i] < codes[j] }) @@ -314,10 +314,11 @@ func (s *SVCBMandatory) unpack(b []byte) error { } func (s *SVCBMandatory) parse(b string) error { - str := strings.Split(b, ",") - codes := make([]SVCBKey, 0, len(str)) - for _, e := range str { - codes = append(codes, svcbStringToKey(e)) + codes := make([]SVCBKey, 0, strings.Count(b, ",")+1) + for len(b) > 0 { + var key string + key, b, _ = strings.Cut(b, ",") + codes = append(codes, svcbStringToKey(key)) } s.Code = codes return nil @@ -328,9 +329,7 @@ func (s *SVCBMandatory) len() int { } func (s *SVCBMandatory) copy() SVCBKeyValue { - return &SVCBMandatory{ - append([]SVCBKey(nil), s.Code...), - } + return &SVCBMandatory{cloneSlice(s.Code)} } // SVCBAlpn pair is used to list supported connection protocols. @@ -353,7 +352,7 @@ func (*SVCBAlpn) Key() SVCBKey { return SVCB_ALPN } func (s *SVCBAlpn) String() string { // An ALPN value is a comma-separated list of values, each of which can be // an arbitrary binary value. In order to allow parsing, the comma and - // backslash characters are themselves excaped. + // backslash characters are themselves escaped. // // However, this escaping is done in addition to the normal escaping which // happens in zone files, meaning that these values must be @@ -481,9 +480,7 @@ func (s *SVCBAlpn) len() int { } func (s *SVCBAlpn) copy() SVCBKeyValue { - return &SVCBAlpn{ - append([]string(nil), s.Alpn...), - } + return &SVCBAlpn{cloneSlice(s.Alpn)} } // SVCBNoDefaultAlpn pair signifies no support for default connection protocols. @@ -563,15 +560,15 @@ func (s *SVCBPort) parse(b string) error { // to the hinted IP address may be terminated and a new connection may be opened. // Basic use pattern for creating an ipv4hint option: // -// h := new(dns.HTTPS) -// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} -// e := new(dns.SVCBIPv4Hint) -// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()} +// h := new(dns.HTTPS) +// h.Hdr = dns.RR_Header{Name: ".", Rrtype: dns.TypeHTTPS, Class: dns.ClassINET} +// e := new(dns.SVCBIPv4Hint) +// e.Hint = []net.IP{net.IPv4(1,1,1,1).To4()} // -// Or +// Or // -// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()} -// h.Value = append(h.Value, e) +// e.Hint = []net.IP{net.ParseIP("1.1.1.1").To4()} +// h.Value = append(h.Value, e) type SVCBIPv4Hint struct { Hint []net.IP } @@ -595,6 +592,7 @@ func (s *SVCBIPv4Hint) unpack(b []byte) error { if len(b) == 0 || len(b)%4 != 0 { return errors.New("dns: svcbipv4hint: ipv4 address byte array length is not a multiple of 4") } + b = cloneSlice(b) x := make([]net.IP, 0, len(b)/4) for i := 0; i < len(b); i += 4 { x = append(x, net.IP(b[i:i+4])) @@ -616,31 +614,33 @@ func (s *SVCBIPv4Hint) String() string { } func (s *SVCBIPv4Hint) parse(b string) error { + if b == "" { + return errors.New("dns: svcbipv4hint: empty hint") + } if strings.Contains(b, ":") { return errors.New("dns: svcbipv4hint: expected ipv4, got ipv6") } - str := strings.Split(b, ",") - dst := make([]net.IP, len(str)) - for i, e := range str { + + hint := make([]net.IP, 0, strings.Count(b, ",")+1) + for len(b) > 0 { + var e string + e, b, _ = strings.Cut(b, ",") ip := net.ParseIP(e).To4() if ip == nil { return errors.New("dns: svcbipv4hint: bad ip") } - dst[i] = ip + hint = append(hint, ip) } - s.Hint = dst + s.Hint = hint return nil } func (s *SVCBIPv4Hint) copy() SVCBKeyValue { hint := make([]net.IP, len(s.Hint)) for i, ip := range s.Hint { - hint[i] = copyIP(ip) - } - - return &SVCBIPv4Hint{ - Hint: hint, + hint[i] = cloneSlice(ip) } + return &SVCBIPv4Hint{Hint: hint} } // SVCBECHConfig pair contains the ECHConfig structure defined in draft-ietf-tls-esni [RFC xxxx]. @@ -660,19 +660,18 @@ func (s *SVCBECHConfig) String() string { return toBase64(s.ECH) } func (s *SVCBECHConfig) len() int { return len(s.ECH) } func (s *SVCBECHConfig) pack() ([]byte, error) { - return append([]byte(nil), s.ECH...), nil + return cloneSlice(s.ECH), nil } func (s *SVCBECHConfig) copy() SVCBKeyValue { - return &SVCBECHConfig{ - append([]byte(nil), s.ECH...), - } + return &SVCBECHConfig{cloneSlice(s.ECH)} } func (s *SVCBECHConfig) unpack(b []byte) error { - s.ECH = append([]byte(nil), b...) + s.ECH = cloneSlice(b) return nil } + func (s *SVCBECHConfig) parse(b string) error { x, err := fromBase64([]byte(b)) if err != nil { @@ -715,6 +714,7 @@ func (s *SVCBIPv6Hint) unpack(b []byte) error { if len(b) == 0 || len(b)%16 != 0 { return errors.New("dns: svcbipv6hint: ipv6 address byte array length not a multiple of 16") } + b = cloneSlice(b) x := make([]net.IP, 0, len(b)/16) for i := 0; i < len(b); i += 16 { ip := net.IP(b[i : i+16]) @@ -739,9 +739,14 @@ func (s *SVCBIPv6Hint) String() string { } func (s *SVCBIPv6Hint) parse(b string) error { - str := strings.Split(b, ",") - dst := make([]net.IP, len(str)) - for i, e := range str { + if b == "" { + return errors.New("dns: svcbipv6hint: empty hint") + } + + hint := make([]net.IP, 0, strings.Count(b, ",")+1) + for len(b) > 0 { + var e string + e, b, _ = strings.Cut(b, ",") ip := net.ParseIP(e) if ip == nil { return errors.New("dns: svcbipv6hint: bad ip") @@ -749,21 +754,18 @@ func (s *SVCBIPv6Hint) parse(b string) error { if ip.To4() != nil { return errors.New("dns: svcbipv6hint: expected ipv6, got ipv4-mapped-ipv6") } - dst[i] = ip + hint = append(hint, ip) } - s.Hint = dst + s.Hint = hint return nil } func (s *SVCBIPv6Hint) copy() SVCBKeyValue { hint := make([]net.IP, len(s.Hint)) for i, ip := range s.Hint { - hint[i] = copyIP(ip) - } - - return &SVCBIPv6Hint{ - Hint: hint, + hint[i] = cloneSlice(ip) } + return &SVCBIPv6Hint{Hint: hint} } // SVCBDoHPath pair is used to indicate the URI template that the @@ -831,11 +833,11 @@ type SVCBLocal struct { func (s *SVCBLocal) Key() SVCBKey { return s.KeyCode } func (s *SVCBLocal) String() string { return svcbParamToStr(s.Data) } -func (s *SVCBLocal) pack() ([]byte, error) { return append([]byte(nil), s.Data...), nil } +func (s *SVCBLocal) pack() ([]byte, error) { return cloneSlice(s.Data), nil } func (s *SVCBLocal) len() int { return len(s.Data) } func (s *SVCBLocal) unpack(b []byte) error { - s.Data = append([]byte(nil), b...) + s.Data = cloneSlice(b) return nil } @@ -849,9 +851,7 @@ func (s *SVCBLocal) parse(b string) error { } func (s *SVCBLocal) copy() SVCBKeyValue { - return &SVCBLocal{s.KeyCode, - append([]byte(nil), s.Data...), - } + return &SVCBLocal{s.KeyCode, cloneSlice(s.Data)} } func (rr *SVCB) String() string { @@ -867,8 +867,8 @@ func (rr *SVCB) String() string { // areSVCBPairArraysEqual checks if SVCBKeyValue arrays are equal after sorting their // copies. arrA and arrB have equal lengths, otherwise zduplicate.go wouldn't call this function. func areSVCBPairArraysEqual(a []SVCBKeyValue, b []SVCBKeyValue) bool { - a = append([]SVCBKeyValue(nil), a...) - b = append([]SVCBKeyValue(nil), b...) + a = cloneSlice(a) + b = cloneSlice(b) sort.Slice(a, func(i, j int) bool { return a[i].Key() < a[j].Key() }) sort.Slice(b, func(i, j int) bool { return b[i].Key() < b[j].Key() }) for i, e := range a { diff --git a/vendor/github.com/miekg/dns/tools.go b/vendor/github.com/miekg/dns/tools.go index d1118253..ccf8f6bf 100644 --- a/vendor/github.com/miekg/dns/tools.go +++ b/vendor/github.com/miekg/dns/tools.go @@ -1,3 +1,4 @@ +//go:build tools // +build tools // We include our tool dependencies for `go generate` here to ensure they're diff --git a/vendor/github.com/miekg/dns/types.go b/vendor/github.com/miekg/dns/types.go index d9becb67..8e3129cb 100644 --- a/vendor/github.com/miekg/dns/types.go +++ b/vendor/github.com/miekg/dns/types.go @@ -65,6 +65,7 @@ const ( TypeAPL uint16 = 42 TypeDS uint16 = 43 TypeSSHFP uint16 = 44 + TypeIPSECKEY uint16 = 45 TypeRRSIG uint16 = 46 TypeNSEC uint16 = 47 TypeDNSKEY uint16 = 48 @@ -98,6 +99,7 @@ const ( TypeURI uint16 = 256 TypeCAA uint16 = 257 TypeAVC uint16 = 258 + TypeAMTRELAY uint16 = 260 TypeTKEY uint16 = 249 TypeTSIG uint16 = 250 @@ -133,8 +135,8 @@ const ( RcodeNXRrset = 8 // NXRRSet - RR Set that should exist does not [DNS Update] RcodeNotAuth = 9 // NotAuth - Server Not Authoritative for zone [DNS Update] RcodeNotZone = 10 // NotZone - Name not contained in zone [DNS Update/TSIG] - RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] - RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] + RcodeBadSig = 16 // BADSIG - TSIG Signature Failure [TSIG] https://www.rfc-editor.org/rfc/rfc6895.html#section-2.3 + RcodeBadVers = 16 // BADVERS - Bad OPT Version [EDNS0] https://www.rfc-editor.org/rfc/rfc6895.html#section-2.3 RcodeBadKey = 17 // BADKEY - Key not recognized [TSIG] RcodeBadTime = 18 // BADTIME - Signature out of time window [TSIG] RcodeBadMode = 19 // BADMODE - Bad TKEY Mode [TKEY] @@ -159,6 +161,22 @@ const ( ZoneMDHashAlgSHA512 = 2 ) +// Used in IPSEC https://datatracker.ietf.org/doc/html/rfc4025#section-2.3 +const ( + IPSECGatewayNone uint8 = iota + IPSECGatewayIPv4 + IPSECGatewayIPv6 + IPSECGatewayHost +) + +// Used in AMTRELAY https://datatracker.ietf.org/doc/html/rfc8777#section-4.2.3 +const ( + AMTRELAYNone = IPSECGatewayNone + AMTRELAYIPv4 = IPSECGatewayIPv4 + AMTRELAYIPv6 = IPSECGatewayIPv6 + AMTRELAYHost = IPSECGatewayHost +) + // Header is the wire format for the DNS packet header. type Header struct { Id uint16 @@ -180,7 +198,7 @@ const ( _CD = 1 << 4 // checking disabled ) -// Various constants used in the LOC RR. See RFC 1887. +// Various constants used in the LOC RR. See RFC 1876. const ( LOC_EQUATOR = 1 << 31 // RFC 1876, Section 2. LOC_PRIMEMERIDIAN = 1 << 31 // RFC 1876, Section 2. @@ -218,6 +236,9 @@ var CertTypeToString = map[uint16]string{ CertOID: "OID", } +// Prefix for IPv4 encoded as IPv6 address +const ipv4InIPv6Prefix = "::ffff:" + //go:generate go run types_generate.go // Question holds a DNS question. Usually there is just one. While the @@ -381,6 +402,17 @@ func (rr *X25) String() string { return rr.Hdr.String() + rr.PSDNAddress } +// ISDN RR. See RFC 1183, Section 3.2. +type ISDN struct { + Hdr RR_Header + Address string + SubAddress string +} + +func (rr *ISDN) String() string { + return rr.Hdr.String() + sprintTxt([]string{rr.Address, rr.SubAddress}) +} + // RT RR. See RFC 1183, Section 3.3. type RT struct { Hdr RR_Header @@ -613,8 +645,8 @@ func nextByte(s string, offset int) (byte, int) { return 0, 0 case 2, 3: // too short to be \ddd default: // maybe \ddd - if isDigit(s[offset+1]) && isDigit(s[offset+2]) && isDigit(s[offset+3]) { - return dddStringToByte(s[offset+1:]), 4 + if isDDD(s[offset+1:]) { + return dddToByte(s[offset+1:]), 4 } } // not \ddd, just an RFC 1035 "quoted" character @@ -733,6 +765,11 @@ func (rr *AAAA) String() string { if rr.AAAA == nil { return rr.Hdr.String() } + + if rr.AAAA.To4() != nil { + return rr.Hdr.String() + ipv4InIPv6Prefix + rr.AAAA.String() + } + return rr.Hdr.String() + rr.AAAA.String() } @@ -760,7 +797,7 @@ func (rr *GPOS) String() string { return rr.Hdr.String() + rr.Longitude + " " + rr.Latitude + " " + rr.Altitude } -// LOC RR. See RFC RFC 1876. +// LOC RR. See RFC 1876. type LOC struct { Hdr RR_Header Version uint8 @@ -774,7 +811,10 @@ type LOC struct { // cmToM takes a cm value expressed in RFC 1876 SIZE mantissa/exponent // format and returns a string in m (two decimals for the cm). -func cmToM(m, e uint8) string { +func cmToM(x uint8) string { + m := x & 0xf0 >> 4 + e := x & 0x0f + if e < 2 { if e == 1 { m *= 10 @@ -830,10 +870,9 @@ func (rr *LOC) String() string { s += fmt.Sprintf("%.0fm ", alt) } - s += cmToM(rr.Size&0xf0>>4, rr.Size&0x0f) + "m " - s += cmToM(rr.HorizPre&0xf0>>4, rr.HorizPre&0x0f) + "m " - s += cmToM(rr.VertPre&0xf0>>4, rr.VertPre&0x0f) + "m" - + s += cmToM(rr.Size) + "m " + s += cmToM(rr.HorizPre) + "m " + s += cmToM(rr.VertPre) + "m" return s } @@ -870,6 +909,11 @@ func (rr *RRSIG) String() string { return s } +// NXT RR. See RFC 2535. +type NXT struct { + NSEC +} + // NSEC RR. See RFC 4034 and RFC 3755. type NSEC struct { Hdr RR_Header @@ -954,7 +998,7 @@ func (rr *TALINK) String() string { sprintName(rr.PreviousName) + " " + sprintName(rr.NextName) } -// SSHFP RR. See RFC RFC 4255. +// SSHFP RR. See RFC 4255. type SSHFP struct { Hdr RR_Header Algorithm uint8 @@ -968,7 +1012,7 @@ func (rr *SSHFP) String() string { " " + strings.ToUpper(rr.FingerPrint) } -// KEY RR. See RFC RFC 2535. +// KEY RR. See RFC 2535. type KEY struct { DNSKEY } @@ -994,6 +1038,69 @@ func (rr *DNSKEY) String() string { " " + rr.PublicKey } +// IPSECKEY RR. See RFC 4025. +type IPSECKEY struct { + Hdr RR_Header + Precedence uint8 + GatewayType uint8 + Algorithm uint8 + GatewayAddr net.IP `dns:"-"` // packing/unpacking/parsing/etc handled together with GatewayHost + GatewayHost string `dns:"ipsechost"` + PublicKey string `dns:"base64"` +} + +func (rr *IPSECKEY) String() string { + var gateway string + switch rr.GatewayType { + case IPSECGatewayIPv4, IPSECGatewayIPv6: + gateway = rr.GatewayAddr.String() + case IPSECGatewayHost: + gateway = rr.GatewayHost + case IPSECGatewayNone: + fallthrough + default: + gateway = "." + } + + return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) + + " " + strconv.Itoa(int(rr.GatewayType)) + + " " + strconv.Itoa(int(rr.Algorithm)) + + " " + gateway + + " " + rr.PublicKey +} + +// AMTRELAY RR. See RFC 8777. +type AMTRELAY struct { + Hdr RR_Header + Precedence uint8 + GatewayType uint8 // discovery is packed in here at bit 0x80 + GatewayAddr net.IP `dns:"-"` // packing/unpacking/parsing/etc handled together with GatewayHost + GatewayHost string `dns:"amtrelayhost"` +} + +func (rr *AMTRELAY) String() string { + var gateway string + switch rr.GatewayType & 0x7f { + case AMTRELAYIPv4, AMTRELAYIPv6: + gateway = rr.GatewayAddr.String() + case AMTRELAYHost: + gateway = rr.GatewayHost + case AMTRELAYNone: + fallthrough + default: + gateway = "." + } + boolS := "0" + if rr.GatewayType&0x80 == 0x80 { + boolS = "1" + } + + return rr.Hdr.String() + strconv.Itoa(int(rr.Precedence)) + + " " + boolS + + " " + strconv.Itoa(int(rr.GatewayType&0x7f)) + + " " + gateway +} + // RKEY RR. See https://www.iana.org/assignments/dns-parameters/RKEY/rkey-completed-template. type RKEY struct { Hdr RR_Header @@ -1215,7 +1322,7 @@ type NINFO struct { func (rr *NINFO) String() string { return rr.Hdr.String() + sprintTxt(rr.ZSData) } -// NID RR. See RFC RFC 6742. +// NID RR. See RFC 6742. type NID struct { Hdr RR_Header Preference uint16 @@ -1434,7 +1541,7 @@ func (a *APLPrefix) str() string { case net.IPv6len: // add prefix for IPv4-mapped IPv6 if v4 := a.Network.IP.To4(); v4 != nil { - sb.WriteString("::ffff:") + sb.WriteString(ipv4InIPv6Prefix) } sb.WriteString(a.Network.IP.String()) } @@ -1450,7 +1557,7 @@ func (a *APLPrefix) str() string { // equals reports whether two APL prefixes are identical. func (a *APLPrefix) equals(b *APLPrefix) bool { return a.Negation == b.Negation && - bytes.Equal(a.Network.IP, b.Network.IP) && + a.Network.IP.Equal(b.Network.IP) && bytes.Equal(a.Network.Mask, b.Network.Mask) } @@ -1518,21 +1625,19 @@ func euiToString(eui uint64, bits int) (hex string) { return } -// copyIP returns a copy of ip. -func copyIP(ip net.IP) net.IP { - p := make(net.IP, len(ip)) - copy(p, ip) - return p +// cloneSlice returns a shallow copy of s. +func cloneSlice[E any, S ~[]E](s S) S { + if s == nil { + return nil + } + return append(S(nil), s...) } // copyNet returns a copy of a subnet. func copyNet(n net.IPNet) net.IPNet { - m := make(net.IPMask, len(n.Mask)) - copy(m, n.Mask) - return net.IPNet{ - IP: copyIP(n.IP), - Mask: m, + IP: cloneSlice(n.IP), + Mask: cloneSlice(n.Mask), } } diff --git a/vendor/github.com/miekg/dns/udp.go b/vendor/github.com/miekg/dns/udp.go index a4826ee2..c018ad43 100644 --- a/vendor/github.com/miekg/dns/udp.go +++ b/vendor/github.com/miekg/dns/udp.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package dns diff --git a/vendor/github.com/miekg/dns/udp_windows.go b/vendor/github.com/miekg/dns/udp_windows.go index e7dd8ca3..a259b67e 100644 --- a/vendor/github.com/miekg/dns/udp_windows.go +++ b/vendor/github.com/miekg/dns/udp_windows.go @@ -1,5 +1,9 @@ +//go:build windows // +build windows +// TODO(tmthrgd): Remove this Windows-specific code if go.dev/issue/7175 and +// go.dev/issue/7174 are ever fixed. + package dns import "net" @@ -14,7 +18,6 @@ func (s *SessionUDP) RemoteAddr() net.Addr { return s.raddr } // ReadFromSessionUDP acts just like net.UDPConn.ReadFrom(), but returns a session object instead of a // net.UDPAddr. -// TODO(fastest963): Once go1.10 is released, use ReadMsgUDP. func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { n, raddr, err := conn.ReadFrom(b) if err != nil { @@ -24,12 +27,9 @@ func ReadFromSessionUDP(conn *net.UDPConn, b []byte) (int, *SessionUDP, error) { } // WriteToSessionUDP acts just like net.UDPConn.WriteTo(), but uses a *SessionUDP instead of a net.Addr. -// TODO(fastest963): Once go1.10 is released, use WriteMsgUDP. func WriteToSessionUDP(conn *net.UDPConn, b []byte, session *SessionUDP) (int, error) { return conn.WriteTo(b, session.raddr) } -// TODO(fastest963): Once go1.10 is released and we can use *MsgUDP methods -// use the standard method in udp.go for these. func setUDPSocketOptions(*net.UDPConn) error { return nil } func parseDstFromOOB([]byte, net.IP) net.IP { return nil } diff --git a/vendor/github.com/miekg/dns/version.go b/vendor/github.com/miekg/dns/version.go index b1a872bd..dc34e590 100644 --- a/vendor/github.com/miekg/dns/version.go +++ b/vendor/github.com/miekg/dns/version.go @@ -3,7 +3,7 @@ package dns import "fmt" // Version is current version of this library. -var Version = v{1, 1, 50} +var Version = v{1, 1, 58} // v holds the version of this library. type v struct { diff --git a/vendor/github.com/miekg/dns/xfr.go b/vendor/github.com/miekg/dns/xfr.go index 1917e91c..05b3c5ad 100644 --- a/vendor/github.com/miekg/dns/xfr.go +++ b/vendor/github.com/miekg/dns/xfr.go @@ -44,7 +44,6 @@ func (t *Transfer) tsigProvider() TsigProvider { // dnscon := &dns.Conn{Conn:con} // transfer = &dns.Transfer{Conn: dnscon} // channel, err := transfer.In(message, master) -// func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { switch q.Question[0].Qtype { case TypeAXFR, TypeIXFR: @@ -81,8 +80,13 @@ func (t *Transfer) In(q *Msg, a string) (env chan *Envelope, err error) { func (t *Transfer) inAxfr(q *Msg, c chan *Envelope) { first := true - defer t.Close() - defer close(c) + defer func() { + // First close the connection, then the channel. This allows functions blocked on + // the channel to assume that the connection is closed and no further operations are + // pending when they resume. + t.Close() + close(c) + }() timeout := dnsTimeout if t.ReadTimeout != 0 { timeout = t.ReadTimeout @@ -132,8 +136,13 @@ func (t *Transfer) inIxfr(q *Msg, c chan *Envelope) { axfr := true n := 0 qser := q.Ns[0].(*SOA).Serial - defer t.Close() - defer close(c) + defer func() { + // First close the connection, then the channel. This allows functions blocked on + // the channel to assume that the connection is closed and no further operations are + // pending when they resume. + t.Close() + close(c) + }() timeout := dnsTimeout if t.ReadTimeout != 0 { timeout = t.ReadTimeout diff --git a/vendor/github.com/miekg/dns/zduplicate.go b/vendor/github.com/miekg/dns/zduplicate.go index 9eb1dac2..03029fb3 100644 --- a/vendor/github.com/miekg/dns/zduplicate.go +++ b/vendor/github.com/miekg/dns/zduplicate.go @@ -43,6 +43,32 @@ func (r1 *AFSDB) isDuplicate(_r2 RR) bool { return true } +func (r1 *AMTRELAY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*AMTRELAY) + if !ok { + return false + } + _ = r2 + if r1.Precedence != r2.Precedence { + return false + } + if r1.GatewayType != r2.GatewayType { + return false + } + switch r1.GatewayType { + case IPSECGatewayIPv4, IPSECGatewayIPv6: + if !r1.GatewayAddr.Equal(r2.GatewayAddr) { + return false + } + case IPSECGatewayHost: + if !isDuplicateName(r1.GatewayHost, r2.GatewayHost) { + return false + } + } + + return true +} + func (r1 *ANY) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*ANY) if !ok { @@ -423,6 +449,53 @@ func (r1 *HTTPS) isDuplicate(_r2 RR) bool { return true } +func (r1 *IPSECKEY) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*IPSECKEY) + if !ok { + return false + } + _ = r2 + if r1.Precedence != r2.Precedence { + return false + } + if r1.GatewayType != r2.GatewayType { + return false + } + if r1.Algorithm != r2.Algorithm { + return false + } + switch r1.GatewayType { + case IPSECGatewayIPv4, IPSECGatewayIPv6: + if !r1.GatewayAddr.Equal(r2.GatewayAddr) { + return false + } + case IPSECGatewayHost: + if !isDuplicateName(r1.GatewayHost, r2.GatewayHost) { + return false + } + } + + if r1.PublicKey != r2.PublicKey { + return false + } + return true +} + +func (r1 *ISDN) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*ISDN) + if !ok { + return false + } + _ = r2 + if r1.Address != r2.Address { + return false + } + if r1.SubAddress != r2.SubAddress { + return false + } + return true +} + func (r1 *KEY) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*KEY) if !ok { @@ -813,6 +886,26 @@ func (r1 *NULL) isDuplicate(_r2 RR) bool { return true } +func (r1 *NXT) isDuplicate(_r2 RR) bool { + r2, ok := _r2.(*NXT) + if !ok { + return false + } + _ = r2 + if !isDuplicateName(r1.NextDomain, r2.NextDomain) { + return false + } + if len(r1.TypeBitMap) != len(r2.TypeBitMap) { + return false + } + for i := 0; i < len(r1.TypeBitMap); i++ { + if r1.TypeBitMap[i] != r2.TypeBitMap[i] { + return false + } + } + return true +} + func (r1 *OPENPGPKEY) isDuplicate(_r2 RR) bool { r2, ok := _r2.(*OPENPGPKEY) if !ok { diff --git a/vendor/github.com/miekg/dns/zmsg.go b/vendor/github.com/miekg/dns/zmsg.go index fc0822f9..39b3bc81 100644 --- a/vendor/github.com/miekg/dns/zmsg.go +++ b/vendor/github.com/miekg/dns/zmsg.go @@ -32,6 +32,22 @@ func (rr *AFSDB) pack(msg []byte, off int, compression compressionMap, compress return off, nil } +func (rr *AMTRELAY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Precedence, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.GatewayType, msg, off) + if err != nil { + return off, err + } + off, err = packIPSECGateway(rr.GatewayAddr, rr.GatewayHost, msg, off, rr.GatewayType, compression, false) + if err != nil { + return off, err + } + return off, nil +} + func (rr *ANY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { return off, nil } @@ -332,6 +348,42 @@ func (rr *HTTPS) pack(msg []byte, off int, compression compressionMap, compress return off, nil } +func (rr *IPSECKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packUint8(rr.Precedence, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.GatewayType, msg, off) + if err != nil { + return off, err + } + off, err = packUint8(rr.Algorithm, msg, off) + if err != nil { + return off, err + } + off, err = packIPSECGateway(rr.GatewayAddr, rr.GatewayHost, msg, off, rr.GatewayType, compression, false) + if err != nil { + return off, err + } + off, err = packStringBase64(rr.PublicKey, msg, off) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *ISDN) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packString(rr.Address, msg, off) + if err != nil { + return off, err + } + off, err = packString(rr.SubAddress, msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *KEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packUint16(rr.Flags, msg, off) if err != nil { @@ -654,6 +706,18 @@ func (rr *NULL) pack(msg []byte, off int, compression compressionMap, compress b return off, nil } +func (rr *NXT) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { + off, err = packDomainName(rr.NextDomain, msg, off, compression, false) + if err != nil { + return off, err + } + off, err = packDataNsec(rr.TypeBitMap, msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *OPENPGPKEY) pack(msg []byte, off int, compression compressionMap, compress bool) (off1 int, err error) { off, err = packStringBase64(rr.PublicKey, msg, off) if err != nil { @@ -1180,6 +1244,34 @@ func (rr *AFSDB) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *AMTRELAY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Precedence, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.GatewayType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + if off == len(msg) { + return off, nil + } + rr.GatewayAddr, rr.GatewayHost, off, err = unpackIPSECGateway(msg, off, rr.GatewayType) + if err != nil { + return off, err + } + return off, nil +} + func (rr *ANY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart @@ -1636,6 +1728,66 @@ func (rr *HTTPS) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *IPSECKEY) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Precedence, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.GatewayType, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.Algorithm, off, err = unpackUint8(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + if off == len(msg) { + return off, nil + } + rr.GatewayAddr, rr.GatewayHost, off, err = unpackIPSECGateway(msg, off, rr.GatewayType) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.PublicKey, off, err = unpackStringBase64(msg, off, rdStart+int(rr.Hdr.Rdlength)) + if err != nil { + return off, err + } + return off, nil +} + +func (rr *ISDN) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.Address, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.SubAddress, off, err = unpackString(msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *KEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart @@ -2114,6 +2266,24 @@ func (rr *NULL) unpack(msg []byte, off int) (off1 int, err error) { return off, nil } +func (rr *NXT) unpack(msg []byte, off int) (off1 int, err error) { + rdStart := off + _ = rdStart + + rr.NextDomain, off, err = UnpackDomainName(msg, off) + if err != nil { + return off, err + } + if off == len(msg) { + return off, nil + } + rr.TypeBitMap, off, err = unpackDataNsec(msg, off) + if err != nil { + return off, err + } + return off, nil +} + func (rr *OPENPGPKEY) unpack(msg []byte, off int) (off1 int, err error) { rdStart := off _ = rdStart diff --git a/vendor/github.com/miekg/dns/ztypes.go b/vendor/github.com/miekg/dns/ztypes.go index 5d060cfe..2c70fc44 100644 --- a/vendor/github.com/miekg/dns/ztypes.go +++ b/vendor/github.com/miekg/dns/ztypes.go @@ -12,6 +12,7 @@ var TypeToRR = map[uint16]func() RR{ TypeA: func() RR { return new(A) }, TypeAAAA: func() RR { return new(AAAA) }, TypeAFSDB: func() RR { return new(AFSDB) }, + TypeAMTRELAY: func() RR { return new(AMTRELAY) }, TypeANY: func() RR { return new(ANY) }, TypeAPL: func() RR { return new(APL) }, TypeAVC: func() RR { return new(AVC) }, @@ -34,6 +35,8 @@ var TypeToRR = map[uint16]func() RR{ TypeHINFO: func() RR { return new(HINFO) }, TypeHIP: func() RR { return new(HIP) }, TypeHTTPS: func() RR { return new(HTTPS) }, + TypeIPSECKEY: func() RR { return new(IPSECKEY) }, + TypeISDN: func() RR { return new(ISDN) }, TypeKEY: func() RR { return new(KEY) }, TypeKX: func() RR { return new(KX) }, TypeL32: func() RR { return new(L32) }, @@ -57,6 +60,7 @@ var TypeToRR = map[uint16]func() RR{ TypeNSEC3: func() RR { return new(NSEC3) }, TypeNSEC3PARAM: func() RR { return new(NSEC3PARAM) }, TypeNULL: func() RR { return new(NULL) }, + TypeNXT: func() RR { return new(NXT) }, TypeOPENPGPKEY: func() RR { return new(OPENPGPKEY) }, TypeOPT: func() RR { return new(OPT) }, TypePTR: func() RR { return new(PTR) }, @@ -90,6 +94,7 @@ var TypeToString = map[uint16]string{ TypeA: "A", TypeAAAA: "AAAA", TypeAFSDB: "AFSDB", + TypeAMTRELAY: "AMTRELAY", TypeANY: "ANY", TypeAPL: "APL", TypeATMA: "ATMA", @@ -114,6 +119,7 @@ var TypeToString = map[uint16]string{ TypeHINFO: "HINFO", TypeHIP: "HIP", TypeHTTPS: "HTTPS", + TypeIPSECKEY: "IPSECKEY", TypeISDN: "ISDN", TypeIXFR: "IXFR", TypeKEY: "KEY", @@ -176,6 +182,7 @@ var TypeToString = map[uint16]string{ func (rr *A) Header() *RR_Header { return &rr.Hdr } func (rr *AAAA) Header() *RR_Header { return &rr.Hdr } func (rr *AFSDB) Header() *RR_Header { return &rr.Hdr } +func (rr *AMTRELAY) Header() *RR_Header { return &rr.Hdr } func (rr *ANY) Header() *RR_Header { return &rr.Hdr } func (rr *APL) Header() *RR_Header { return &rr.Hdr } func (rr *AVC) Header() *RR_Header { return &rr.Hdr } @@ -198,6 +205,8 @@ func (rr *GPOS) Header() *RR_Header { return &rr.Hdr } func (rr *HINFO) Header() *RR_Header { return &rr.Hdr } func (rr *HIP) Header() *RR_Header { return &rr.Hdr } func (rr *HTTPS) Header() *RR_Header { return &rr.Hdr } +func (rr *IPSECKEY) Header() *RR_Header { return &rr.Hdr } +func (rr *ISDN) Header() *RR_Header { return &rr.Hdr } func (rr *KEY) Header() *RR_Header { return &rr.Hdr } func (rr *KX) Header() *RR_Header { return &rr.Hdr } func (rr *L32) Header() *RR_Header { return &rr.Hdr } @@ -221,6 +230,7 @@ func (rr *NSEC) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3) Header() *RR_Header { return &rr.Hdr } func (rr *NSEC3PARAM) Header() *RR_Header { return &rr.Hdr } func (rr *NULL) Header() *RR_Header { return &rr.Hdr } +func (rr *NXT) Header() *RR_Header { return &rr.Hdr } func (rr *OPENPGPKEY) Header() *RR_Header { return &rr.Hdr } func (rr *OPT) Header() *RR_Header { return &rr.Hdr } func (rr *PTR) Header() *RR_Header { return &rr.Hdr } @@ -257,6 +267,7 @@ func (rr *A) len(off int, compression map[string]struct{}) int { } return l } + func (rr *AAAA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) if len(rr.AAAA) != 0 { @@ -264,16 +275,34 @@ func (rr *AAAA) len(off int, compression map[string]struct{}) int { } return l } + func (rr *AFSDB) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Subtype l += domainNameLen(rr.Hostname, off+l, compression, false) return l } + +func (rr *AMTRELAY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Precedence + l++ // GatewayType + switch rr.GatewayType { + case AMTRELAYIPv4: + l += net.IPv4len + case AMTRELAYIPv6: + l += net.IPv6len + case AMTRELAYHost: + l += len(rr.GatewayHost) + 1 + } + return l +} + func (rr *ANY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) return l } + func (rr *APL) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Prefixes { @@ -281,6 +310,7 @@ func (rr *APL) len(off int, compression map[string]struct{}) int { } return l } + func (rr *AVC) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { @@ -288,6 +318,7 @@ func (rr *AVC) len(off int, compression map[string]struct{}) int { } return l } + func (rr *CAA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Flag @@ -295,6 +326,7 @@ func (rr *CAA) len(off int, compression map[string]struct{}) int { l += len(rr.Value) return l } + func (rr *CERT) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Type @@ -303,21 +335,25 @@ func (rr *CERT) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.Certificate)) return l } + func (rr *CNAME) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Target, off+l, compression, true) return l } + func (rr *DHCID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.Digest)) return l } + func (rr *DNAME) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Target, off+l, compression, false) return l } + func (rr *DNSKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Flags @@ -326,6 +362,7 @@ func (rr *DNSKEY) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } + func (rr *DS) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // KeyTag @@ -334,26 +371,31 @@ func (rr *DS) len(off int, compression map[string]struct{}) int { l += len(rr.Digest) / 2 return l } + func (rr *EID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Endpoint) / 2 return l } + func (rr *EUI48) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 6 // Address return l } + func (rr *EUI64) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 8 // Address return l } + func (rr *GID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 4 // Gid return l } + func (rr *GPOS) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Longitude) + 1 @@ -361,12 +403,14 @@ func (rr *GPOS) len(off int, compression map[string]struct{}) int { l += len(rr.Altitude) + 1 return l } + func (rr *HINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Cpu) + 1 l += len(rr.Os) + 1 return l } + func (rr *HIP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // HitLength @@ -379,12 +423,38 @@ func (rr *HIP) len(off int, compression map[string]struct{}) int { } return l } + +func (rr *IPSECKEY) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l++ // Precedence + l++ // GatewayType + l++ // Algorithm + switch rr.GatewayType { + case IPSECGatewayIPv4: + l += net.IPv4len + case IPSECGatewayIPv6: + l += net.IPv6len + case IPSECGatewayHost: + l += len(rr.GatewayHost) + 1 + } + l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) + return l +} + +func (rr *ISDN) len(off int, compression map[string]struct{}) int { + l := rr.Hdr.len(off, compression) + l += len(rr.Address) + 1 + l += len(rr.SubAddress) + 1 + return l +} + func (rr *KX) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Exchanger, off+l, compression, false) return l } + func (rr *L32) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference @@ -393,12 +463,14 @@ func (rr *L32) len(off int, compression map[string]struct{}) int { } return l } + func (rr *L64) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += 8 // Locator64 return l } + func (rr *LOC) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Version @@ -410,49 +482,58 @@ func (rr *LOC) len(off int, compression map[string]struct{}) int { l += 4 // Altitude return l } + func (rr *LP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Fqdn, off+l, compression, false) return l } + func (rr *MB) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mb, off+l, compression, true) return l } + func (rr *MD) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Md, off+l, compression, true) return l } + func (rr *MF) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mf, off+l, compression, true) return l } + func (rr *MG) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mg, off+l, compression, true) return l } + func (rr *MINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Rmail, off+l, compression, true) l += domainNameLen(rr.Email, off+l, compression, true) return l } + func (rr *MR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mr, off+l, compression, true) return l } + func (rr *MX) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Mx, off+l, compression, true) return l } + func (rr *NAPTR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Order @@ -463,17 +544,20 @@ func (rr *NAPTR) len(off int, compression map[string]struct{}) int { l += domainNameLen(rr.Replacement, off+l, compression, false) return l } + func (rr *NID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += 8 // NodeID return l } + func (rr *NIMLOC) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Locator) / 2 return l } + func (rr *NINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.ZSData { @@ -481,16 +565,19 @@ func (rr *NINFO) len(off int, compression map[string]struct{}) int { } return l } + func (rr *NS) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ns, off+l, compression, true) return l } + func (rr *NSAPPTR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ptr, off+l, compression, false) return l } + func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Hash @@ -500,21 +587,25 @@ func (rr *NSEC3PARAM) len(off int, compression map[string]struct{}) int { l += len(rr.Salt) / 2 return l } + func (rr *NULL) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Data) return l } + func (rr *OPENPGPKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } + func (rr *PTR) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ptr, off+l, compression, true) return l } + func (rr *PX) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference @@ -522,11 +613,13 @@ func (rr *PX) len(off int, compression map[string]struct{}) int { l += domainNameLen(rr.Mapx400, off+l, compression, false) return l } + func (rr *RFC3597) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Rdata) / 2 return l } + func (rr *RKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Flags @@ -535,12 +628,14 @@ func (rr *RKEY) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.PublicKey)) return l } + func (rr *RP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Mbox, off+l, compression, false) l += domainNameLen(rr.Txt, off+l, compression, false) return l } + func (rr *RRSIG) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // TypeCovered @@ -554,12 +649,14 @@ func (rr *RRSIG) len(off int, compression map[string]struct{}) int { l += base64.StdEncoding.DecodedLen(len(rr.Signature)) return l } + func (rr *RT) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Preference l += domainNameLen(rr.Host, off+l, compression, false) return l } + func (rr *SMIMEA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Usage @@ -568,6 +665,7 @@ func (rr *SMIMEA) len(off int, compression map[string]struct{}) int { l += len(rr.Certificate) / 2 return l } + func (rr *SOA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Ns, off+l, compression, true) @@ -579,6 +677,7 @@ func (rr *SOA) len(off int, compression map[string]struct{}) int { l += 4 // Minttl return l } + func (rr *SPF) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { @@ -586,6 +685,7 @@ func (rr *SPF) len(off int, compression map[string]struct{}) int { } return l } + func (rr *SRV) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Priority @@ -594,6 +694,7 @@ func (rr *SRV) len(off int, compression map[string]struct{}) int { l += domainNameLen(rr.Target, off+l, compression, false) return l } + func (rr *SSHFP) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Algorithm @@ -601,6 +702,7 @@ func (rr *SSHFP) len(off int, compression map[string]struct{}) int { l += len(rr.FingerPrint) / 2 return l } + func (rr *SVCB) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Priority @@ -610,6 +712,7 @@ func (rr *SVCB) len(off int, compression map[string]struct{}) int { } return l } + func (rr *TA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // KeyTag @@ -618,12 +721,14 @@ func (rr *TA) len(off int, compression map[string]struct{}) int { l += len(rr.Digest) / 2 return l } + func (rr *TALINK) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.PreviousName, off+l, compression, false) l += domainNameLen(rr.NextName, off+l, compression, false) return l } + func (rr *TKEY) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Algorithm, off+l, compression, false) @@ -637,6 +742,7 @@ func (rr *TKEY) len(off int, compression map[string]struct{}) int { l += len(rr.OtherData) / 2 return l } + func (rr *TLSA) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l++ // Usage @@ -645,6 +751,7 @@ func (rr *TLSA) len(off int, compression map[string]struct{}) int { l += len(rr.Certificate) / 2 return l } + func (rr *TSIG) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += domainNameLen(rr.Algorithm, off+l, compression, false) @@ -658,6 +765,7 @@ func (rr *TSIG) len(off int, compression map[string]struct{}) int { l += len(rr.OtherData) / 2 return l } + func (rr *TXT) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) for _, x := range rr.Txt { @@ -665,16 +773,19 @@ func (rr *TXT) len(off int, compression map[string]struct{}) int { } return l } + func (rr *UID) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 4 // Uid return l } + func (rr *UINFO) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.Uinfo) + 1 return l } + func (rr *URI) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 2 // Priority @@ -682,11 +793,13 @@ func (rr *URI) len(off int, compression map[string]struct{}) int { l += len(rr.Target) return l } + func (rr *X25) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += len(rr.PSDNAddress) + 1 return l } + func (rr *ZONEMD) len(off int, compression map[string]struct{}) int { l := rr.Hdr.len(off, compression) l += 4 // Serial @@ -698,17 +811,31 @@ func (rr *ZONEMD) len(off int, compression map[string]struct{}) int { // copy() functions func (rr *A) copy() RR { - return &A{rr.Hdr, copyIP(rr.A)} + return &A{rr.Hdr, cloneSlice(rr.A)} } + func (rr *AAAA) copy() RR { - return &AAAA{rr.Hdr, copyIP(rr.AAAA)} + return &AAAA{rr.Hdr, cloneSlice(rr.AAAA)} } + func (rr *AFSDB) copy() RR { return &AFSDB{rr.Hdr, rr.Subtype, rr.Hostname} } + +func (rr *AMTRELAY) copy() RR { + return &AMTRELAY{ + rr.Hdr, + rr.Precedence, + rr.GatewayType, + cloneSlice(rr.GatewayAddr), + rr.GatewayHost, + } +} + func (rr *ANY) copy() RR { return &ANY{rr.Hdr} } + func (rr *APL) copy() RR { Prefixes := make([]APLPrefix, len(rr.Prefixes)) for i, e := range rr.Prefixes { @@ -716,150 +843,278 @@ func (rr *APL) copy() RR { } return &APL{rr.Hdr, Prefixes} } + func (rr *AVC) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &AVC{rr.Hdr, Txt} + return &AVC{rr.Hdr, cloneSlice(rr.Txt)} } + func (rr *CAA) copy() RR { - return &CAA{rr.Hdr, rr.Flag, rr.Tag, rr.Value} + return &CAA{ + rr.Hdr, + rr.Flag, + rr.Tag, + rr.Value, + } } + func (rr *CDNSKEY) copy() RR { return &CDNSKEY{*rr.DNSKEY.copy().(*DNSKEY)} } + func (rr *CDS) copy() RR { return &CDS{*rr.DS.copy().(*DS)} } + func (rr *CERT) copy() RR { - return &CERT{rr.Hdr, rr.Type, rr.KeyTag, rr.Algorithm, rr.Certificate} + return &CERT{ + rr.Hdr, + rr.Type, + rr.KeyTag, + rr.Algorithm, + rr.Certificate, + } } + func (rr *CNAME) copy() RR { return &CNAME{rr.Hdr, rr.Target} } + func (rr *CSYNC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &CSYNC{rr.Hdr, rr.Serial, rr.Flags, TypeBitMap} + return &CSYNC{ + rr.Hdr, + rr.Serial, + rr.Flags, + cloneSlice(rr.TypeBitMap), + } } + func (rr *DHCID) copy() RR { return &DHCID{rr.Hdr, rr.Digest} } + func (rr *DLV) copy() RR { return &DLV{*rr.DS.copy().(*DS)} } + func (rr *DNAME) copy() RR { return &DNAME{rr.Hdr, rr.Target} } + func (rr *DNSKEY) copy() RR { - return &DNSKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} + return &DNSKEY{ + rr.Hdr, + rr.Flags, + rr.Protocol, + rr.Algorithm, + rr.PublicKey, + } } + func (rr *DS) copy() RR { - return &DS{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} + return &DS{ + rr.Hdr, + rr.KeyTag, + rr.Algorithm, + rr.DigestType, + rr.Digest, + } } + func (rr *EID) copy() RR { return &EID{rr.Hdr, rr.Endpoint} } + func (rr *EUI48) copy() RR { return &EUI48{rr.Hdr, rr.Address} } + func (rr *EUI64) copy() RR { return &EUI64{rr.Hdr, rr.Address} } + func (rr *GID) copy() RR { return &GID{rr.Hdr, rr.Gid} } + func (rr *GPOS) copy() RR { - return &GPOS{rr.Hdr, rr.Longitude, rr.Latitude, rr.Altitude} + return &GPOS{ + rr.Hdr, + rr.Longitude, + rr.Latitude, + rr.Altitude, + } } + func (rr *HINFO) copy() RR { return &HINFO{rr.Hdr, rr.Cpu, rr.Os} } + func (rr *HIP) copy() RR { - RendezvousServers := make([]string, len(rr.RendezvousServers)) - copy(RendezvousServers, rr.RendezvousServers) - return &HIP{rr.Hdr, rr.HitLength, rr.PublicKeyAlgorithm, rr.PublicKeyLength, rr.Hit, rr.PublicKey, RendezvousServers} + return &HIP{ + rr.Hdr, + rr.HitLength, + rr.PublicKeyAlgorithm, + rr.PublicKeyLength, + rr.Hit, + rr.PublicKey, + cloneSlice(rr.RendezvousServers), + } } + func (rr *HTTPS) copy() RR { return &HTTPS{*rr.SVCB.copy().(*SVCB)} } + +func (rr *IPSECKEY) copy() RR { + return &IPSECKEY{ + rr.Hdr, + rr.Precedence, + rr.GatewayType, + rr.Algorithm, + cloneSlice(rr.GatewayAddr), + rr.GatewayHost, + rr.PublicKey, + } +} + +func (rr *ISDN) copy() RR { + return &ISDN{rr.Hdr, rr.Address, rr.SubAddress} +} + func (rr *KEY) copy() RR { return &KEY{*rr.DNSKEY.copy().(*DNSKEY)} } + func (rr *KX) copy() RR { return &KX{rr.Hdr, rr.Preference, rr.Exchanger} } + func (rr *L32) copy() RR { - return &L32{rr.Hdr, rr.Preference, copyIP(rr.Locator32)} + return &L32{rr.Hdr, rr.Preference, cloneSlice(rr.Locator32)} } + func (rr *L64) copy() RR { return &L64{rr.Hdr, rr.Preference, rr.Locator64} } + func (rr *LOC) copy() RR { - return &LOC{rr.Hdr, rr.Version, rr.Size, rr.HorizPre, rr.VertPre, rr.Latitude, rr.Longitude, rr.Altitude} + return &LOC{ + rr.Hdr, + rr.Version, + rr.Size, + rr.HorizPre, + rr.VertPre, + rr.Latitude, + rr.Longitude, + rr.Altitude, + } } + func (rr *LP) copy() RR { return &LP{rr.Hdr, rr.Preference, rr.Fqdn} } + func (rr *MB) copy() RR { return &MB{rr.Hdr, rr.Mb} } + func (rr *MD) copy() RR { return &MD{rr.Hdr, rr.Md} } + func (rr *MF) copy() RR { return &MF{rr.Hdr, rr.Mf} } + func (rr *MG) copy() RR { return &MG{rr.Hdr, rr.Mg} } + func (rr *MINFO) copy() RR { return &MINFO{rr.Hdr, rr.Rmail, rr.Email} } + func (rr *MR) copy() RR { return &MR{rr.Hdr, rr.Mr} } + func (rr *MX) copy() RR { return &MX{rr.Hdr, rr.Preference, rr.Mx} } + func (rr *NAPTR) copy() RR { - return &NAPTR{rr.Hdr, rr.Order, rr.Preference, rr.Flags, rr.Service, rr.Regexp, rr.Replacement} + return &NAPTR{ + rr.Hdr, + rr.Order, + rr.Preference, + rr.Flags, + rr.Service, + rr.Regexp, + rr.Replacement, + } } + func (rr *NID) copy() RR { return &NID{rr.Hdr, rr.Preference, rr.NodeID} } + func (rr *NIMLOC) copy() RR { return &NIMLOC{rr.Hdr, rr.Locator} } + func (rr *NINFO) copy() RR { - ZSData := make([]string, len(rr.ZSData)) - copy(ZSData, rr.ZSData) - return &NINFO{rr.Hdr, ZSData} + return &NINFO{rr.Hdr, cloneSlice(rr.ZSData)} } + func (rr *NS) copy() RR { return &NS{rr.Hdr, rr.Ns} } + func (rr *NSAPPTR) copy() RR { return &NSAPPTR{rr.Hdr, rr.Ptr} } + func (rr *NSEC) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC{rr.Hdr, rr.NextDomain, TypeBitMap} + return &NSEC{rr.Hdr, rr.NextDomain, cloneSlice(rr.TypeBitMap)} } + func (rr *NSEC3) copy() RR { - TypeBitMap := make([]uint16, len(rr.TypeBitMap)) - copy(TypeBitMap, rr.TypeBitMap) - return &NSEC3{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt, rr.HashLength, rr.NextDomain, TypeBitMap} + return &NSEC3{ + rr.Hdr, + rr.Hash, + rr.Flags, + rr.Iterations, + rr.SaltLength, + rr.Salt, + rr.HashLength, + rr.NextDomain, + cloneSlice(rr.TypeBitMap), + } } + func (rr *NSEC3PARAM) copy() RR { - return &NSEC3PARAM{rr.Hdr, rr.Hash, rr.Flags, rr.Iterations, rr.SaltLength, rr.Salt} + return &NSEC3PARAM{ + rr.Hdr, + rr.Hash, + rr.Flags, + rr.Iterations, + rr.SaltLength, + rr.Salt, + } } + func (rr *NULL) copy() RR { return &NULL{rr.Hdr, rr.Data} } + +func (rr *NXT) copy() RR { + return &NXT{*rr.NSEC.copy().(*NSEC)} +} + func (rr *OPENPGPKEY) copy() RR { return &OPENPGPKEY{rr.Hdr, rr.PublicKey} } + func (rr *OPT) copy() RR { Option := make([]EDNS0, len(rr.Option)) for i, e := range rr.Option { @@ -867,86 +1122,205 @@ func (rr *OPT) copy() RR { } return &OPT{rr.Hdr, Option} } + func (rr *PTR) copy() RR { return &PTR{rr.Hdr, rr.Ptr} } + func (rr *PX) copy() RR { - return &PX{rr.Hdr, rr.Preference, rr.Map822, rr.Mapx400} + return &PX{ + rr.Hdr, + rr.Preference, + rr.Map822, + rr.Mapx400, + } } + func (rr *RFC3597) copy() RR { return &RFC3597{rr.Hdr, rr.Rdata} } + func (rr *RKEY) copy() RR { - return &RKEY{rr.Hdr, rr.Flags, rr.Protocol, rr.Algorithm, rr.PublicKey} + return &RKEY{ + rr.Hdr, + rr.Flags, + rr.Protocol, + rr.Algorithm, + rr.PublicKey, + } } + func (rr *RP) copy() RR { return &RP{rr.Hdr, rr.Mbox, rr.Txt} } + func (rr *RRSIG) copy() RR { - return &RRSIG{rr.Hdr, rr.TypeCovered, rr.Algorithm, rr.Labels, rr.OrigTtl, rr.Expiration, rr.Inception, rr.KeyTag, rr.SignerName, rr.Signature} + return &RRSIG{ + rr.Hdr, + rr.TypeCovered, + rr.Algorithm, + rr.Labels, + rr.OrigTtl, + rr.Expiration, + rr.Inception, + rr.KeyTag, + rr.SignerName, + rr.Signature, + } } + func (rr *RT) copy() RR { return &RT{rr.Hdr, rr.Preference, rr.Host} } + func (rr *SIG) copy() RR { return &SIG{*rr.RRSIG.copy().(*RRSIG)} } + func (rr *SMIMEA) copy() RR { - return &SMIMEA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} + return &SMIMEA{ + rr.Hdr, + rr.Usage, + rr.Selector, + rr.MatchingType, + rr.Certificate, + } } + func (rr *SOA) copy() RR { - return &SOA{rr.Hdr, rr.Ns, rr.Mbox, rr.Serial, rr.Refresh, rr.Retry, rr.Expire, rr.Minttl} + return &SOA{ + rr.Hdr, + rr.Ns, + rr.Mbox, + rr.Serial, + rr.Refresh, + rr.Retry, + rr.Expire, + rr.Minttl, + } } + func (rr *SPF) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &SPF{rr.Hdr, Txt} + return &SPF{rr.Hdr, cloneSlice(rr.Txt)} } + func (rr *SRV) copy() RR { - return &SRV{rr.Hdr, rr.Priority, rr.Weight, rr.Port, rr.Target} + return &SRV{ + rr.Hdr, + rr.Priority, + rr.Weight, + rr.Port, + rr.Target, + } } + func (rr *SSHFP) copy() RR { - return &SSHFP{rr.Hdr, rr.Algorithm, rr.Type, rr.FingerPrint} + return &SSHFP{ + rr.Hdr, + rr.Algorithm, + rr.Type, + rr.FingerPrint, + } } + func (rr *SVCB) copy() RR { Value := make([]SVCBKeyValue, len(rr.Value)) for i, e := range rr.Value { Value[i] = e.copy() } - return &SVCB{rr.Hdr, rr.Priority, rr.Target, Value} + return &SVCB{ + rr.Hdr, + rr.Priority, + rr.Target, + Value, + } } + func (rr *TA) copy() RR { - return &TA{rr.Hdr, rr.KeyTag, rr.Algorithm, rr.DigestType, rr.Digest} + return &TA{ + rr.Hdr, + rr.KeyTag, + rr.Algorithm, + rr.DigestType, + rr.Digest, + } } + func (rr *TALINK) copy() RR { return &TALINK{rr.Hdr, rr.PreviousName, rr.NextName} } + func (rr *TKEY) copy() RR { - return &TKEY{rr.Hdr, rr.Algorithm, rr.Inception, rr.Expiration, rr.Mode, rr.Error, rr.KeySize, rr.Key, rr.OtherLen, rr.OtherData} + return &TKEY{ + rr.Hdr, + rr.Algorithm, + rr.Inception, + rr.Expiration, + rr.Mode, + rr.Error, + rr.KeySize, + rr.Key, + rr.OtherLen, + rr.OtherData, + } } + func (rr *TLSA) copy() RR { - return &TLSA{rr.Hdr, rr.Usage, rr.Selector, rr.MatchingType, rr.Certificate} + return &TLSA{ + rr.Hdr, + rr.Usage, + rr.Selector, + rr.MatchingType, + rr.Certificate, + } } + func (rr *TSIG) copy() RR { - return &TSIG{rr.Hdr, rr.Algorithm, rr.TimeSigned, rr.Fudge, rr.MACSize, rr.MAC, rr.OrigId, rr.Error, rr.OtherLen, rr.OtherData} + return &TSIG{ + rr.Hdr, + rr.Algorithm, + rr.TimeSigned, + rr.Fudge, + rr.MACSize, + rr.MAC, + rr.OrigId, + rr.Error, + rr.OtherLen, + rr.OtherData, + } } + func (rr *TXT) copy() RR { - Txt := make([]string, len(rr.Txt)) - copy(Txt, rr.Txt) - return &TXT{rr.Hdr, Txt} + return &TXT{rr.Hdr, cloneSlice(rr.Txt)} } + func (rr *UID) copy() RR { return &UID{rr.Hdr, rr.Uid} } + func (rr *UINFO) copy() RR { return &UINFO{rr.Hdr, rr.Uinfo} } + func (rr *URI) copy() RR { - return &URI{rr.Hdr, rr.Priority, rr.Weight, rr.Target} + return &URI{ + rr.Hdr, + rr.Priority, + rr.Weight, + rr.Target, + } } + func (rr *X25) copy() RR { return &X25{rr.Hdr, rr.PSDNAddress} } + func (rr *ZONEMD) copy() RR { - return &ZONEMD{rr.Hdr, rr.Serial, rr.Scheme, rr.Hash, rr.Digest} + return &ZONEMD{ + rr.Hdr, + rr.Serial, + rr.Scheme, + rr.Hash, + rr.Digest, + } } diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go index 48d23f91..be01dec9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_command.go @@ -32,6 +32,9 @@ func BuildGenerateCommand() command.Command { {Name: "template-data", KeyPath: "CustomTemplateData", UsageArgument: "template-data-file", Usage: "If specified, generate will use the contents of the file passed as data to be rendered in the test file template"}, + {Name: "tags", KeyPath: "Tags", + UsageArgument: "build-tags", + Usage: "If specified, generate will create a test file that uses the given build tags (i.e. `--tags e2e,!unit` will add `//go:build e2e,!unit`)"}, }, &conf, types.GinkgoFlagSections{}, @@ -59,6 +62,7 @@ You can also pass a of the form "file.go" and generate will emit "fil } type specData struct { + BuildTags string Package string Subject string PackageImportPath string @@ -93,6 +97,7 @@ func generateTestFileForSubject(subject string, conf GeneratorsConfig) { } data := specData{ + BuildTags: getBuildTags(conf.Tags), Package: determinePackageName(packageName, conf.Internal), Subject: formattedName, PackageImportPath: getPackageImportPath(), diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go index c3470adb..4dab07d0 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generate_templates.go @@ -1,6 +1,7 @@ package generators -var specText = `package {{.Package}} +var specText = `{{.BuildTags}} +package {{.Package}} import ( {{.GinkgoImport}} @@ -14,7 +15,8 @@ var _ = {{.GinkgoPackage}}Describe("{{.Subject}}", func() { }) ` -var agoutiSpecText = `package {{.Package}} +var agoutiSpecText = `{{.BuildTags}} +package {{.Package}} import ( {{.GinkgoImport}} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go index 3046a448..28c7aa6f 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/generators/generators_common.go @@ -1,6 +1,7 @@ package generators import ( + "fmt" "go/build" "os" "path/filepath" @@ -14,6 +15,7 @@ type GeneratorsConfig struct { Agouti, NoDot, Internal bool CustomTemplate string CustomTemplateData string + Tags string } func getPackageAndFormattedName() (string, string, string) { @@ -62,3 +64,13 @@ func determinePackageName(name string, internal bool) string { return name + "_test" } + +// getBuildTags returns the resultant string to be added. +// If the input string is not empty, then returns a `//go:build {}` string, +// otherwise returns an empty string. +func getBuildTags(tags string) string { + if tags != "" { + return fmt.Sprintf("//go:build %s\n", tags) + } + return "" +} diff --git a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go index 0b9b19fe..958daccb 100644 --- a/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go +++ b/vendor/github.com/onsi/ginkgo/v2/ginkgo/outline/ginkgo.go @@ -244,9 +244,7 @@ func labelFromCallExpr(ce *ast.CallExpr) []string { } if id.Name == "Label" { ls := extractLabels(expr) - for _, label := range ls { - labels = append(labels, label) - } + labels = append(labels, ls...) } } } diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go index 7f96c450..be506f9b 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/json_report.go @@ -4,12 +4,16 @@ import ( "encoding/json" "fmt" "os" + "path" "github.com/onsi/ginkgo/v2/types" ) -//GenerateJSONReport produces a JSON-formatted report at the passed in destination +// GenerateJSONReport produces a JSON-formatted report at the passed in destination func GenerateJSONReport(report types.Report, destination string) error { + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return err + } f, err := os.Create(destination) if err != nil { return err @@ -25,8 +29,8 @@ func GenerateJSONReport(report types.Report, destination string) error { return f.Close() } -//MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources -//It skips over reports that fail to decode but reports on them via the returned messages []string +// MergeJSONReports produces a single JSON-formatted report at the passed in destination by merging the JSON-formatted reports provided in sources +// It skips over reports that fail to decode but reports on them via the returned messages []string func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, error) { messages := []string{} allReports := []types.Report{} @@ -46,6 +50,9 @@ func MergeAndCleanupJSONReports(sources []string, destination string) ([]string, allReports = append(allReports, reports...) } + if err := os.MkdirAll(path.Dir(destination), 0770); err != nil { + return messages, err + } f, err := os.Create(destination) if err != nil { return messages, err diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go index 592d7f61..81604220 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/junit_report.go @@ -14,6 +14,7 @@ import ( "encoding/xml" "fmt" "os" + "path" "strings" "github.com/onsi/ginkgo/v2/config" @@ -285,6 +286,9 @@ func GenerateJUnitReportWithConfig(report types.Report, dst string, config Junit TestSuites: []JUnitTestSuite{suite}, } + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } f, err := os.Create(dst) if err != nil { return err @@ -322,6 +326,9 @@ func MergeAndCleanupJUnitReports(sources []string, dst string) ([]string, error) mergedReport.TestSuites = append(mergedReport.TestSuites, report.TestSuites...) } + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return messages, err + } f, err := os.Create(dst) if err != nil { return messages, err diff --git a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go index c1863496..e990ad82 100644 --- a/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go +++ b/vendor/github.com/onsi/ginkgo/v2/reporters/teamcity_report.go @@ -11,6 +11,7 @@ package reporters import ( "fmt" "os" + "path" "strings" "github.com/onsi/ginkgo/v2/types" @@ -27,6 +28,9 @@ func tcEscape(s string) string { } func GenerateTeamcityReport(report types.Report, dst string) error { + if err := os.MkdirAll(path.Dir(dst), 0770); err != nil { + return err + } f, err := os.Create(dst) if err != nil { return err diff --git a/vendor/github.com/onsi/ginkgo/v2/types/config.go b/vendor/github.com/onsi/ginkgo/v2/types/config.go index 1014c7b4..c88fc85a 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/config.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/config.go @@ -27,6 +27,7 @@ type SuiteConfig struct { FailOnPending bool FailFast bool FlakeAttempts int + MustPassRepeatedly int DryRun bool PollProgressAfter time.Duration PollProgressInterval time.Duration diff --git a/vendor/github.com/onsi/ginkgo/v2/types/errors.go b/vendor/github.com/onsi/ginkgo/v2/types/errors.go index 1e0dbfd9..4fbdc3e9 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/errors.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/errors.go @@ -453,8 +453,8 @@ func (g ginkgoErrors) InvalidEntryDescription(cl CodeLocation) error { func (g ginkgoErrors) MissingParametersForTableFunction(cl CodeLocation) error { return GinkgoError{ - Heading: fmt.Sprintf("No parameters have been passed to the Table Function"), - Message: fmt.Sprintf("The Table Function expected at least 1 parameter"), + Heading: "No parameters have been passed to the Table Function", + Message: "The Table Function expected at least 1 parameter", CodeLocation: cl, DocLink: "table-specs", } diff --git a/vendor/github.com/onsi/ginkgo/v2/types/types.go b/vendor/github.com/onsi/ginkgo/v2/types/types.go index d048a8ad..aae69b04 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/types.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/types.go @@ -97,9 +97,7 @@ func (report Report) Add(other Report) Report { report.RunTime = report.EndTime.Sub(report.StartTime) reports := make(SpecReports, len(report.SpecReports)+len(other.SpecReports)) - for i := range report.SpecReports { - reports[i] = report.SpecReports[i] - } + copy(reports, report.SpecReports) offset := len(report.SpecReports) for i := range other.SpecReports { reports[i+offset] = other.SpecReports[i] diff --git a/vendor/github.com/onsi/ginkgo/v2/types/version.go b/vendor/github.com/onsi/ginkgo/v2/types/version.go index 43066341..a37f3082 100644 --- a/vendor/github.com/onsi/ginkgo/v2/types/version.go +++ b/vendor/github.com/onsi/ginkgo/v2/types/version.go @@ -1,3 +1,3 @@ package types -const VERSION = "2.9.5" +const VERSION = "2.13.0" diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go index cee360db..2f154907 100644 --- a/vendor/github.com/prometheus/client_model/go/metrics.pb.go +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -483,6 +483,8 @@ type Histogram struct { // histograms. PositiveDelta []int64 `protobuf:"zigzag64,13,rep,name=positive_delta,json=positiveDelta" json:"positive_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket). PositiveCount []float64 `protobuf:"fixed64,14,rep,name=positive_count,json=positiveCount" json:"positive_count,omitempty"` // Absolute count of each bucket. + // Only used for native histograms. These exemplars MUST have a timestamp. + Exemplars []*Exemplar `protobuf:"bytes,16,rep,name=exemplars" json:"exemplars,omitempty"` } func (x *Histogram) Reset() { @@ -622,6 +624,13 @@ func (x *Histogram) GetPositiveCount() []float64 { return nil } +func (x *Histogram) GetExemplars() []*Exemplar { + if x != nil { + return x.Exemplars + } + return nil +} + // A Bucket of a conventional histogram, each of which is treated as // an individual counter-like time series by Prometheus. type Bucket struct { @@ -923,6 +932,7 @@ type MetricFamily struct { Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + Unit *string `protobuf:"bytes,5,opt,name=unit" json:"unit,omitempty"` } func (x *MetricFamily) Reset() { @@ -985,6 +995,13 @@ func (x *MetricFamily) GetMetric() []*Metric { return nil } +func (x *MetricFamily) GetUnit() string { + if x != nil && x.Unit != nil { + return *x.Unit + } + return "" +} + var File_io_prometheus_client_metrics_proto protoreflect.FileDescriptor var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ @@ -1028,7 +1045,7 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48, + 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xea, 0x05, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, @@ -1071,79 +1088,84 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{ 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, - 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, - 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, - 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, - 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, - 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, - 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, - 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, - 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, - 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, - 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, - 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, - 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, - 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, - 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, - 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, - 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, + 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x09, 0x65, 0x78, 0x65, + 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x18, 0x10, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, + 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, + 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x09, 0x65, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x73, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, + 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, + 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, + 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, + 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x14, 0x63, + 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, + 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, + 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65, 0x72, 0x42, + 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, + 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, + 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, + 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, + 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, + 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, + 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, + 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, + 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, + 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, + 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, + 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, - 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, - 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, - 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, - 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, - 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, - 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, - 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, - 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, - 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, + 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18, 0x02, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, + 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, + 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, + 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, + 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, + 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, + 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, + 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, + 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, - 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20, - 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, - 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61, - 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75, - 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, - 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, - 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, - 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, - 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, - 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, - 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, - 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, - 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, - 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, - 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, - 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, - 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, - 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, - 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, - 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, - 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d, - 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55, - 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10, - 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b, - 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48, - 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41, - 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42, - 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, - 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, - 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63, - 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69, - 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69, - 0x65, 0x6e, 0x74, + 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, + 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, + 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, + 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, + 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, + 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, + 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, + 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xb6, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x46, + 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, + 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, + 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, + 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, + 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52, 0x04, 0x74, + 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18, 0x04, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, + 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, + 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12, 0x12, 0x0a, 0x04, 0x75, 0x6e, 0x69, + 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x75, 0x6e, 0x69, 0x74, 0x2a, 0x62, 0x0a, + 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, + 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, + 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, + 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, + 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, + 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, + 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, + 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, + 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, + 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, + 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, + 0x6c, 0x69, 0x65, 0x6e, 0x74, } var ( @@ -1185,22 +1207,23 @@ var file_io_prometheus_client_metrics_proto_depIdxs = []int32{ 13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp 9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan 9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan - 10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar - 1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair - 13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp - 1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair - 2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge - 3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter - 5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary - 6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped - 7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram - 0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType - 11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric - 19, // [19:19] is the sub-list for method output_type - 19, // [19:19] is the sub-list for method input_type - 19, // [19:19] is the sub-list for extension type_name - 19, // [19:19] is the sub-list for extension extendee - 0, // [0:19] is the sub-list for field type_name + 10, // 8: io.prometheus.client.Histogram.exemplars:type_name -> io.prometheus.client.Exemplar + 10, // 9: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar + 1, // 10: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair + 13, // 11: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp + 1, // 12: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair + 2, // 13: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge + 3, // 14: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter + 5, // 15: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary + 6, // 16: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped + 7, // 17: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram + 0, // 18: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType + 11, // 19: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric + 20, // [20:20] is the sub-list for method output_type + 20, // [20:20] is the sub-list for method input_type + 20, // [20:20] is the sub-list for extension type_name + 20, // [20:20] is the sub-list for extension extendee + 0, // [0:20] is the sub-list for field type_name } func init() { file_io_prometheus_client_metrics_proto_init() } diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go index b2b89b01..25cfaa21 100644 --- a/vendor/github.com/prometheus/common/expfmt/decode.go +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -75,14 +75,14 @@ func ResponseFormat(h http.Header) Format { func NewDecoder(r io.Reader, format Format) Decoder { switch format.FormatType() { case TypeProtoDelim: - return &protoDecoder{r: r} + return &protoDecoder{r: bufio.NewReader(r)} } return &textDecoder{r: r} } // protoDecoder implements the Decoder interface for protocol buffers. type protoDecoder struct { - r io.Reader + r protodelim.Reader } // Decode implements the Decoder interface. @@ -90,7 +90,7 @@ func (d *protoDecoder) Decode(v *dto.MetricFamily) error { opts := protodelim.UnmarshalOptions{ MaxSize: -1, } - if err := opts.UnmarshalFrom(bufio.NewReader(d.r), v); err != nil { + if err := opts.UnmarshalFrom(d.r, v); err != nil { return err } if !model.IsValidMetricName(model.LabelValue(v.GetName())) { diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go index 8fd80618..7f6cbe7d 100644 --- a/vendor/github.com/prometheus/common/expfmt/encode.go +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -139,7 +139,13 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format { // interface is kept for backwards compatibility. // In cases where the Format does not allow for UTF-8 names, the global // NameEscapingScheme will be applied. -func NewEncoder(w io.Writer, format Format) Encoder { +// +// NewEncoder can be called with additional options to customize the OpenMetrics text output. +// For example: +// NewEncoder(w, FmtOpenMetrics_1_0_0, WithCreatedLines()) +// +// Extra options are ignored for all other formats. +func NewEncoder(w io.Writer, format Format, options ...EncoderOption) Encoder { escapingScheme := format.ToEscapingScheme() switch format.FormatType() { @@ -178,7 +184,7 @@ func NewEncoder(w io.Writer, format Format) Encoder { case TypeOpenMetrics: return encoderCloser{ encode: func(v *dto.MetricFamily) error { - _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme)) + _, err := MetricFamilyToOpenMetrics(w, model.EscapeMetricFamily(v, escapingScheme), options...) return err }, close: func() error { diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go index 6fc9555e..051b38cd 100644 --- a/vendor/github.com/prometheus/common/expfmt/expfmt.go +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -15,6 +15,7 @@ package expfmt import ( + "fmt" "strings" "github.com/prometheus/common/model" @@ -63,7 +64,7 @@ const ( type FormatType int const ( - TypeUnknown = iota + TypeUnknown FormatType = iota TypeProtoCompact TypeProtoDelim TypeProtoText @@ -73,7 +74,8 @@ const ( // NewFormat generates a new Format from the type provided. Mostly used for // tests, most Formats should be generated as part of content negotiation in -// encode.go. +// encode.go. If a type has more than one version, the latest version will be +// returned. func NewFormat(t FormatType) Format { switch t { case TypeProtoCompact: @@ -91,13 +93,21 @@ func NewFormat(t FormatType) Format { } } +// NewOpenMetricsFormat generates a new OpenMetrics format matching the +// specified version number. +func NewOpenMetricsFormat(version string) (Format, error) { + if version == OpenMetricsVersion_0_0_1 { + return fmtOpenMetrics_0_0_1, nil + } + if version == OpenMetricsVersion_1_0_0 { + return fmtOpenMetrics_1_0_0, nil + } + return fmtUnknown, fmt.Errorf("unknown open metrics version string") +} + // FormatType deduces an overall FormatType for the given format. func (f Format) FormatType() FormatType { toks := strings.Split(string(f), ";") - if len(toks) < 2 { - return TypeUnknown - } - params := make(map[string]string) for i, t := range toks { if i == 0 { diff --git a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go index 5622578e..353c5e93 100644 --- a/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go +++ b/vendor/github.com/prometheus/common/expfmt/openmetrics_create.go @@ -22,11 +22,47 @@ import ( "strconv" "strings" + "google.golang.org/protobuf/types/known/timestamppb" + "github.com/prometheus/common/model" dto "github.com/prometheus/client_model/go" ) +type encoderOption struct { + withCreatedLines bool + withUnit bool +} + +type EncoderOption func(*encoderOption) + +// WithCreatedLines is an EncoderOption that configures the OpenMetrics encoder +// to include _created lines (See +// https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#counter-1). +// Created timestamps can improve the accuracy of series reset detection, but +// come with a bandwidth cost. +// +// At the time of writing, created timestamp ingestion is still experimental in +// Prometheus and need to be enabled with the feature-flag +// `--feature-flag=created-timestamp-zero-ingestion`, and breaking changes are +// still possible. Therefore, it is recommended to use this feature with caution. +func WithCreatedLines() EncoderOption { + return func(t *encoderOption) { + t.withCreatedLines = true + } +} + +// WithUnit is an EncoderOption enabling a set unit to be written to the output +// and to be added to the metric name, if it's not there already, as a suffix. +// Without opting in this way, the unit will not be added to the metric name and, +// on top of that, the unit will not be passed onto the output, even if it +// were declared in the *dto.MetricFamily struct, i.e. even if in.Unit !=nil. +func WithUnit() EncoderOption { + return func(t *encoderOption) { + t.withUnit = true + } +} + // MetricFamilyToOpenMetrics converts a MetricFamily proto message into the // OpenMetrics text format and writes the resulting lines to 'out'. It returns // the number of bytes written and any error encountered. The output will have @@ -59,20 +95,34 @@ import ( // Prometheus to OpenMetrics or vice versa: // // - Counters are expected to have the `_total` suffix in their metric name. In -// the output, the suffix will be truncated from the `# TYPE` and `# HELP` -// line. A counter with a missing `_total` suffix is not an error. However, +// the output, the suffix will be truncated from the `# TYPE`, `# HELP` and `# UNIT` +// lines. A counter with a missing `_total` suffix is not an error. However, // its type will be set to `unknown` in that case to avoid invalid OpenMetrics // output. // -// - No support for the following (optional) features: `# UNIT` line, `_created` -// line, info type, stateset type, gaugehistogram type. +// - According to the OM specs, the `# UNIT` line is optional, but if populated, +// the unit has to be present in the metric name as its suffix: +// (see https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#unit). +// However, in order to accommodate any potential scenario where such a change in the +// metric name is not desirable, the users are here given the choice of either explicitly +// opt in, in case they wish for the unit to be included in the output AND in the metric name +// as a suffix (see the description of the WithUnit function above), +// or not to opt in, in case they don't want for any of that to happen. +// +// - No support for the following (optional) features: info type, +// stateset type, gaugehistogram type. // // - The size of exemplar labels is not checked (i.e. it's possible to create // exemplars that are larger than allowed by the OpenMetrics specification). // // - The value of Counters is not checked. (OpenMetrics doesn't allow counters // with a `NaN` value.) -func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int, err error) { +func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily, options ...EncoderOption) (written int, err error) { + toOM := encoderOption{} + for _, option := range options { + option(&toOM) + } + name := in.GetName() if name == "" { return 0, fmt.Errorf("MetricFamily has no name: %s", in) @@ -95,12 +145,15 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } var ( - n int - metricType = in.GetType() - shortName = name + n int + metricType = in.GetType() + compliantName = name ) - if metricType == dto.MetricType_COUNTER && strings.HasSuffix(shortName, "_total") { - shortName = name[:len(name)-6] + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(compliantName, "_total") { + compliantName = name[:len(name)-6] + } + if toOM.withUnit && in.Unit != nil && !strings.HasSuffix(compliantName, fmt.Sprintf("_%s", *in.Unit)) { + compliantName = compliantName + fmt.Sprintf("_%s", *in.Unit) } // Comments, first HELP, then TYPE. @@ -110,7 +163,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = writeName(w, shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -136,7 +189,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } - n, err = writeName(w, shortName) + n, err = writeName(w, compliantName) written += n if err != nil { return @@ -163,55 +216,89 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int if err != nil { return } + if toOM.withUnit && in.Unit != nil { + n, err = w.WriteString("# UNIT ") + written += n + if err != nil { + return + } + n, err = writeName(w, compliantName) + written += n + if err != nil { + return + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return + } + n, err = writeEscapedString(w, *in.Unit, true) + written += n + if err != nil { + return + } + err = w.WriteByte('\n') + written++ + if err != nil { + return + } + } + + var createdTsBytesWritten int // Finally the samples, one line for each. + if metricType == dto.MetricType_COUNTER && strings.HasSuffix(name, "_total") { + compliantName = compliantName + "_total" + } for _, metric := range in.Metric { switch metricType { case dto.MetricType_COUNTER: if metric.Counter == nil { return written, fmt.Errorf( - "expected counter in metric %s %s", name, metric, + "expected counter in metric %s %s", compliantName, metric, ) } - // Note that we have ensured above that either the name - // ends on `_total` or that the rendered type is - // `unknown`. Therefore, no `_total` must be added here. n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Counter.GetValue(), 0, false, metric.Counter.Exemplar, ) + if toOM.withCreatedLines && metric.Counter.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "_total", metric, "", 0, metric.Counter.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_GAUGE: if metric.Gauge == nil { return written, fmt.Errorf( - "expected gauge in metric %s %s", name, metric, + "expected gauge in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Gauge.GetValue(), 0, false, nil, ) case dto.MetricType_UNTYPED: if metric.Untyped == nil { return written, fmt.Errorf( - "expected untyped in metric %s %s", name, metric, + "expected untyped in metric %s %s", compliantName, metric, ) } n, err = writeOpenMetricsSample( - w, name, "", metric, "", 0, + w, compliantName, "", metric, "", 0, metric.Untyped.GetValue(), 0, false, nil, ) case dto.MetricType_SUMMARY: if metric.Summary == nil { return written, fmt.Errorf( - "expected summary in metric %s %s", name, metric, + "expected summary in metric %s %s", compliantName, metric, ) } for _, q := range metric.Summary.Quantile { n, err = writeOpenMetricsSample( - w, name, "", metric, + w, compliantName, "", metric, model.QuantileLabel, q.GetQuantile(), q.GetValue(), 0, false, nil, @@ -222,7 +309,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Summary.GetSampleSum(), 0, false, nil, ) @@ -231,20 +318,24 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Summary.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Summary.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Summary.GetCreatedTimestamp()) + n += createdTsBytesWritten + } case dto.MetricType_HISTOGRAM: if metric.Histogram == nil { return written, fmt.Errorf( - "expected histogram in metric %s %s", name, metric, + "expected histogram in metric %s %s", compliantName, metric, ) } infSeen := false for _, b := range metric.Histogram.Bucket { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, b.GetUpperBound(), 0, b.GetCumulativeCount(), true, b.Exemplar, @@ -259,7 +350,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } if !infSeen { n, err = writeOpenMetricsSample( - w, name, "_bucket", metric, + w, compliantName, "_bucket", metric, model.BucketLabel, math.Inf(+1), 0, metric.Histogram.GetSampleCount(), true, nil, @@ -270,7 +361,7 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int } } n, err = writeOpenMetricsSample( - w, name, "_sum", metric, "", 0, + w, compliantName, "_sum", metric, "", 0, metric.Histogram.GetSampleSum(), 0, false, nil, ) @@ -279,13 +370,17 @@ func MetricFamilyToOpenMetrics(out io.Writer, in *dto.MetricFamily) (written int return } n, err = writeOpenMetricsSample( - w, name, "_count", metric, "", 0, + w, compliantName, "_count", metric, "", 0, 0, metric.Histogram.GetSampleCount(), true, nil, ) + if toOM.withCreatedLines && metric.Histogram.CreatedTimestamp != nil { + createdTsBytesWritten, err = writeOpenMetricsCreated(w, compliantName, "", metric, "", 0, metric.Histogram.GetCreatedTimestamp()) + n += createdTsBytesWritten + } default: return written, fmt.Errorf( - "unexpected type in metric %s %s", name, metric, + "unexpected type in metric %s %s", compliantName, metric, ) } written += n @@ -350,7 +445,7 @@ func writeOpenMetricsSample( return written, err } } - if exemplar != nil { + if exemplar != nil && len(exemplar.Label) > 0 { n, err = writeExemplar(w, exemplar) written += n if err != nil { @@ -473,6 +568,49 @@ func writeOpenMetricsNameAndLabelPairs( return written, nil } +// writeOpenMetricsCreated writes the created timestamp for a single time series +// following OpenMetrics text format to w, given the metric name, the metric proto +// message itself, optionally a suffix to be removed, e.g. '_total' for counters, +// an additional label name with a float64 value (use empty string as label name if +// not required) and the timestamp that represents the created timestamp. +// The function returns the number of bytes written and any error encountered. +func writeOpenMetricsCreated(w enhancedWriter, + name, suffixToTrim string, metric *dto.Metric, + additionalLabelName string, additionalLabelValue float64, + createdTimestamp *timestamppb.Timestamp, +) (int, error) { + written := 0 + n, err := writeOpenMetricsNameAndLabelPairs( + w, strings.TrimSuffix(name, suffixToTrim)+"_created", metric.Label, additionalLabelName, additionalLabelValue, + ) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte(' ') + written++ + if err != nil { + return written, err + } + + // TODO(beorn7): Format this directly from components of ts to + // avoid overflow/underflow and precision issues of the float + // conversion. + n, err = writeOpenMetricsFloat(w, float64(createdTimestamp.AsTime().UnixNano())/1e9) + written += n + if err != nil { + return written, err + } + + err = w.WriteByte('\n') + written++ + if err != nil { + return written, err + } + return written, nil +} + // writeExemplar writes the provided exemplar in OpenMetrics format to w. The // function returns the number of bytes written and any error encountered. func writeExemplar(w enhancedWriter, e *dto.Exemplar) (int, error) { diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go index 178fdbaf..80d1fe94 100644 --- a/vendor/github.com/prometheus/common/model/alert.go +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -75,7 +75,12 @@ func (a *Alert) ResolvedAt(ts time.Time) bool { // Status returns the status of the alert. func (a *Alert) Status() AlertStatus { - if a.Resolved() { + return a.StatusAt(time.Now()) +} + +// StatusAt returns the status of the alert at the given timestamp. +func (a *Alert) StatusAt(ts time.Time) AlertStatus { + if a.ResolvedAt(ts) { return AlertResolved } return AlertFiring @@ -127,6 +132,17 @@ func (as Alerts) HasFiring() bool { return false } +// HasFiringAt returns true iff one of the alerts is not resolved +// at the time ts. +func (as Alerts) HasFiringAt(ts time.Time) bool { + for _, a := range as { + if !a.ResolvedAt(ts) { + return true + } + } + return false +} + // Status returns StatusFiring iff at least one of the alerts is firing. func (as Alerts) Status() AlertStatus { if as.HasFiring() { @@ -134,3 +150,12 @@ func (as Alerts) Status() AlertStatus { } return AlertResolved } + +// StatusAt returns StatusFiring iff at least one of the alerts is firing +// at the time ts. +func (as Alerts) StatusAt(ts time.Time) AlertStatus { + if as.HasFiringAt(ts) { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go index 6eda08a7..d0ad88da 100644 --- a/vendor/github.com/prometheus/common/model/labelset.go +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -17,7 +17,6 @@ import ( "encoding/json" "fmt" "sort" - "strings" ) // A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet @@ -129,16 +128,6 @@ func (l LabelSet) Merge(other LabelSet) LabelSet { return result } -func (l LabelSet) String() string { - lstrs := make([]string, 0, len(l)) - for l, v := range l { - lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) - } - - sort.Strings(lstrs) - return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) -} - // Fingerprint returns the LabelSet's fingerprint. func (ls LabelSet) Fingerprint() Fingerprint { return labelSetToFingerprint(ls) diff --git a/vendor/github.com/prometheus/common/model/labelset_string.go b/vendor/github.com/prometheus/common/model/labelset_string.go new file mode 100644 index 00000000..481c47b4 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset_string.go @@ -0,0 +1,45 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build go1.21 + +package model + +import ( + "bytes" + "slices" + "strconv" +) + +// String will look like `{foo="bar", more="less"}`. Names are sorted alphabetically. +func (l LabelSet) String() string { + var lna [32]string // On stack to avoid memory allocation for sorting names. + labelNames := lna[:0] + for name := range l { + labelNames = append(labelNames, string(name)) + } + slices.Sort(labelNames) + var bytea [1024]byte // On stack to avoid memory allocation while building the output. + b := bytes.NewBuffer(bytea[:0]) + b.WriteByte('{') + for i, name := range labelNames { + if i > 0 { + b.WriteString(", ") + } + b.WriteString(name) + b.WriteByte('=') + b.Write(strconv.AppendQuote(b.AvailableBuffer(), string(l[LabelName(name)]))) + } + b.WriteByte('}') + return b.String() +} diff --git a/vendor/github.com/prometheus/common/model/labelset_string_go120.go b/vendor/github.com/prometheus/common/model/labelset_string_go120.go new file mode 100644 index 00000000..c4212685 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset_string_go120.go @@ -0,0 +1,39 @@ +// Copyright 2024 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build !go1.21 + +package model + +import ( + "fmt" + "sort" + "strings" +) + +// String was optimized using functions not available for go 1.20 +// or lower. We keep the old implementation for compatibility with client_golang. +// Once client golang drops support for go 1.20 (scheduled for August 2024), this +// file can be removed. +func (l LabelSet) String() string { + labelNames := make([]string, 0, len(l)) + for name := range l { + labelNames = append(labelNames, string(name)) + } + sort.Strings(labelNames) + lstrs := make([]string, 0, len(l)) + for _, name := range labelNames { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", name, l[LabelName(name)])) + } + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go index 0bd29b3a..eb865e5a 100644 --- a/vendor/github.com/prometheus/common/model/metric.go +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -204,6 +204,7 @@ func EscapeMetricFamily(v *dto.MetricFamily, scheme EscapingScheme) *dto.MetricF out := &dto.MetricFamily{ Help: v.Help, Type: v.Type, + Unit: v.Unit, } // If the name is nil, copy as-is, don't try to escape. diff --git a/vendor/golang.org/x/net/nettest/conntest.go b/vendor/golang.org/x/net/nettest/conntest.go new file mode 100644 index 00000000..615f4980 --- /dev/null +++ b/vendor/golang.org/x/net/nettest/conntest.go @@ -0,0 +1,468 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import ( + "bytes" + "encoding/binary" + "io" + "io/ioutil" + "math/rand" + "net" + "runtime" + "sync" + "testing" + "time" +) + +// MakePipe creates a connection between two endpoints and returns the pair +// as c1 and c2, such that anything written to c1 is read by c2 and vice-versa. +// The stop function closes all resources, including c1, c2, and the underlying +// net.Listener (if there is one), and should not be nil. +type MakePipe func() (c1, c2 net.Conn, stop func(), err error) + +// TestConn tests that a net.Conn implementation properly satisfies the interface. +// The tests should not produce any false positives, but may experience +// false negatives. Thus, some issues may only be detected when the test is +// run multiple times. For maximal effectiveness, run the tests under the +// race detector. +func TestConn(t *testing.T, mp MakePipe) { + t.Run("BasicIO", func(t *testing.T) { timeoutWrapper(t, mp, testBasicIO) }) + t.Run("PingPong", func(t *testing.T) { timeoutWrapper(t, mp, testPingPong) }) + t.Run("RacyRead", func(t *testing.T) { timeoutWrapper(t, mp, testRacyRead) }) + t.Run("RacyWrite", func(t *testing.T) { timeoutWrapper(t, mp, testRacyWrite) }) + t.Run("ReadTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testReadTimeout) }) + t.Run("WriteTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testWriteTimeout) }) + t.Run("PastTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPastTimeout) }) + t.Run("PresentTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testPresentTimeout) }) + t.Run("FutureTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testFutureTimeout) }) + t.Run("CloseTimeout", func(t *testing.T) { timeoutWrapper(t, mp, testCloseTimeout) }) + t.Run("ConcurrentMethods", func(t *testing.T) { timeoutWrapper(t, mp, testConcurrentMethods) }) +} + +type connTester func(t *testing.T, c1, c2 net.Conn) + +func timeoutWrapper(t *testing.T, mp MakePipe, f connTester) { + t.Helper() + c1, c2, stop, err := mp() + if err != nil { + t.Fatalf("unable to make pipe: %v", err) + } + var once sync.Once + defer once.Do(func() { stop() }) + timer := time.AfterFunc(time.Minute, func() { + once.Do(func() { + t.Error("test timed out; terminating pipe") + stop() + }) + }) + defer timer.Stop() + f(t, c1, c2) +} + +// testBasicIO tests that the data sent on c1 is properly received on c2. +func testBasicIO(t *testing.T, c1, c2 net.Conn) { + want := make([]byte, 1<<20) + rand.New(rand.NewSource(0)).Read(want) + + dataCh := make(chan []byte) + go func() { + rd := bytes.NewReader(want) + if err := chunkedCopy(c1, rd); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } + if err := c1.Close(); err != nil { + t.Errorf("unexpected c1.Close error: %v", err) + } + }() + + go func() { + wr := new(bytes.Buffer) + if err := chunkedCopy(wr, c2); err != nil { + t.Errorf("unexpected c2.Read error: %v", err) + } + if err := c2.Close(); err != nil { + t.Errorf("unexpected c2.Close error: %v", err) + } + dataCh <- wr.Bytes() + }() + + if got := <-dataCh; !bytes.Equal(got, want) { + t.Error("transmitted data differs") + } +} + +// testPingPong tests that the two endpoints can synchronously send data to +// each other in a typical request-response pattern. +func testPingPong(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + + pingPonger := func(c net.Conn) { + defer wg.Done() + buf := make([]byte, 8) + var prev uint64 + for { + if _, err := io.ReadFull(c, buf); err != nil { + if err == io.EOF { + break + } + t.Errorf("unexpected Read error: %v", err) + } + + v := binary.LittleEndian.Uint64(buf) + binary.LittleEndian.PutUint64(buf, v+1) + if prev != 0 && prev+2 != v { + t.Errorf("mismatching value: got %d, want %d", v, prev+2) + } + prev = v + if v == 1000 { + break + } + + if _, err := c.Write(buf); err != nil { + t.Errorf("unexpected Write error: %v", err) + break + } + } + if err := c.Close(); err != nil { + t.Errorf("unexpected Close error: %v", err) + } + } + + wg.Add(2) + go pingPonger(c1) + go pingPonger(c2) + + // Start off the chain reaction. + if _, err := c1.Write(make([]byte, 8)); err != nil { + t.Errorf("unexpected c1.Write error: %v", err) + } +} + +// testRacyRead tests that it is safe to mutate the input Read buffer +// immediately after cancelation has occurred. +func testRacyRead(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Read(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetReadDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testRacyWrite tests that it is safe to mutate the input Write buffer +// immediately after cancelation has occurred. +func testRacyWrite(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + var wg sync.WaitGroup + defer wg.Wait() + + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + defer wg.Done() + + b1 := make([]byte, 1024) + b2 := make([]byte, 1024) + for j := 0; j < 100; j++ { + _, err := c1.Write(b1) + copy(b1, b2) // Mutate b1 to trigger potential race + if err != nil { + checkForTimeoutError(t, err) + c1.SetWriteDeadline(time.Now().Add(time.Millisecond)) + } + } + }() + } +} + +// testReadTimeout tests that Read timeouts do not affect Write. +func testReadTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(ioutil.Discard, c2) + + c1.SetReadDeadline(aLongTimeAgo) + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Write(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// testWriteTimeout tests that Write timeouts do not affect Read. +func testWriteTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, rand.New(rand.NewSource(0))) + + c1.SetWriteDeadline(aLongTimeAgo) + _, err := c1.Write(make([]byte, 1024)) + checkForTimeoutError(t, err) + if _, err := c1.Read(make([]byte, 1024)); err != nil { + t.Errorf("unexpected Read error: %v", err) + } +} + +// testPastTimeout tests that a deadline set in the past immediately times out +// Read and Write requests. +func testPastTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + testRoundtrip(t, c1) + + c1.SetDeadline(aLongTimeAgo) + n, err := c1.Write(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Write count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + n, err = c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + + testRoundtrip(t, c1) +} + +// testPresentTimeout tests that a past deadline set while there are pending +// Read and Write operations immediately times out those operations. +func testPresentTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + deadlineSet := make(chan bool, 1) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + deadlineSet <- true + c1.SetReadDeadline(aLongTimeAgo) + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + n, err := c1.Read(make([]byte, 1024)) + if n != 0 { + t.Errorf("unexpected Read count: got %d, want 0", n) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Read timed out before deadline is set") + } + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + if len(deadlineSet) == 0 { + t.Error("Write timed out before deadline is set") + } + }() +} + +// testFutureTimeout tests that a future deadline will eventually time out +// Read and Write operations. +func testFutureTimeout(t *testing.T, c1, c2 net.Conn) { + var wg sync.WaitGroup + wg.Add(2) + + c1.SetDeadline(time.Now().Add(100 * time.Millisecond)) + go func() { + defer wg.Done() + _, err := c1.Read(make([]byte, 1024)) + checkForTimeoutError(t, err) + }() + go func() { + defer wg.Done() + var err error + for err == nil { + _, err = c1.Write(make([]byte, 1024)) + } + checkForTimeoutError(t, err) + }() + wg.Wait() + + go chunkedCopy(c2, c2) + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// testCloseTimeout tests that calling Close immediately times out pending +// Read and Write operations. +func testCloseTimeout(t *testing.T, c1, c2 net.Conn) { + go chunkedCopy(c2, c2) + + var wg sync.WaitGroup + defer wg.Wait() + wg.Add(3) + + // Test for cancelation upon connection closure. + c1.SetDeadline(neverTimeout) + go func() { + defer wg.Done() + time.Sleep(100 * time.Millisecond) + c1.Close() + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Read(buf) + } + }() + go func() { + defer wg.Done() + var err error + buf := make([]byte, 1024) + for err == nil { + _, err = c1.Write(buf) + } + }() +} + +// testConcurrentMethods tests that the methods of net.Conn can safely +// be called concurrently. +func testConcurrentMethods(t *testing.T, c1, c2 net.Conn) { + if runtime.GOOS == "plan9" { + t.Skip("skipping on plan9; see https://golang.org/issue/20489") + } + go chunkedCopy(c2, c2) + + // The results of the calls may be nonsensical, but this should + // not trigger a race detector warning. + var wg sync.WaitGroup + for i := 0; i < 100; i++ { + wg.Add(7) + go func() { + defer wg.Done() + c1.Read(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.Write(make([]byte, 1024)) + }() + go func() { + defer wg.Done() + c1.SetDeadline(time.Now().Add(10 * time.Millisecond)) + }() + go func() { + defer wg.Done() + c1.SetReadDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.SetWriteDeadline(aLongTimeAgo) + }() + go func() { + defer wg.Done() + c1.LocalAddr() + }() + go func() { + defer wg.Done() + c1.RemoteAddr() + }() + } + wg.Wait() // At worst, the deadline is set 10ms into the future + + resyncConn(t, c1) + testRoundtrip(t, c1) +} + +// checkForTimeoutError checks that the error satisfies the Error interface +// and that Timeout returns true. +func checkForTimeoutError(t *testing.T, err error) { + t.Helper() + if nerr, ok := err.(net.Error); ok { + if !nerr.Timeout() { + if runtime.GOOS == "windows" && runtime.GOARCH == "arm64" && t.Name() == "TestTestConn/TCP/RacyRead" { + t.Logf("ignoring known failure mode on windows/arm64; see https://go.dev/issue/52893") + } else { + t.Errorf("got error: %v, want err.Timeout() = true", nerr) + } + } + } else { + t.Errorf("got %T: %v, want net.Error", err, err) + } +} + +// testRoundtrip writes something into c and reads it back. +// It assumes that everything written into c is echoed back to itself. +func testRoundtrip(t *testing.T, c net.Conn) { + t.Helper() + if err := c.SetDeadline(neverTimeout); err != nil { + t.Errorf("roundtrip SetDeadline error: %v", err) + } + + const s = "Hello, world!" + buf := []byte(s) + if _, err := c.Write(buf); err != nil { + t.Errorf("roundtrip Write error: %v", err) + } + if _, err := io.ReadFull(c, buf); err != nil { + t.Errorf("roundtrip Read error: %v", err) + } + if string(buf) != s { + t.Errorf("roundtrip data mismatch: got %q, want %q", buf, s) + } +} + +// resyncConn resynchronizes the connection into a sane state. +// It assumes that everything written into c is echoed back to itself. +// It assumes that 0xff is not currently on the wire or in the read buffer. +func resyncConn(t *testing.T, c net.Conn) { + t.Helper() + c.SetDeadline(neverTimeout) + errCh := make(chan error) + go func() { + _, err := c.Write([]byte{0xff}) + errCh <- err + }() + buf := make([]byte, 1024) + for { + n, err := c.Read(buf) + if n > 0 && bytes.IndexByte(buf[:n], 0xff) == n-1 { + break + } + if err != nil { + t.Errorf("unexpected Read error: %v", err) + break + } + } + if err := <-errCh; err != nil { + t.Errorf("unexpected Write error: %v", err) + } +} + +// chunkedCopy copies from r to w in fixed-width chunks to avoid +// causing a Write that exceeds the maximum packet size for packet-based +// connections like "unixpacket". +// We assume that the maximum packet size is at least 1024. +func chunkedCopy(w io.Writer, r io.Reader) error { + b := make([]byte, 1024) + _, err := io.CopyBuffer(struct{ io.Writer }{w}, struct{ io.Reader }{r}, b) + return err +} diff --git a/vendor/golang.org/x/net/nettest/nettest.go b/vendor/golang.org/x/net/nettest/nettest.go new file mode 100644 index 00000000..3656c3c5 --- /dev/null +++ b/vendor/golang.org/x/net/nettest/nettest.go @@ -0,0 +1,345 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package nettest provides utilities for network testing. +package nettest + +import ( + "errors" + "fmt" + "io/ioutil" + "net" + "os" + "os/exec" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +var ( + stackOnce sync.Once + ipv4Enabled bool + canListenTCP4OnLoopback bool + ipv6Enabled bool + canListenTCP6OnLoopback bool + unStrmDgramEnabled bool + rawSocketSess bool + + aLongTimeAgo = time.Unix(233431200, 0) + neverTimeout = time.Time{} + + errNoAvailableInterface = errors.New("no available interface") + errNoAvailableAddress = errors.New("no available address") +) + +func probeStack() { + if _, err := RoutedInterface("ip4", net.FlagUp); err == nil { + ipv4Enabled = true + } + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + ln.Close() + canListenTCP4OnLoopback = true + } + if _, err := RoutedInterface("ip6", net.FlagUp); err == nil { + ipv6Enabled = true + } + if ln, err := net.Listen("tcp6", "[::1]:0"); err == nil { + ln.Close() + canListenTCP6OnLoopback = true + } + rawSocketSess = supportsRawSocket() + switch runtime.GOOS { + case "aix": + // Unix network isn't properly working on AIX 7.2 with + // Technical Level < 2. + out, _ := exec.Command("oslevel", "-s").Output() + if len(out) >= len("7200-XX-ZZ-YYMM") { // AIX 7.2, Tech Level XX, Service Pack ZZ, date YYMM + ver := string(out[:4]) + tl, _ := strconv.Atoi(string(out[5:7])) + unStrmDgramEnabled = ver > "7200" || (ver == "7200" && tl >= 2) + } + default: + unStrmDgramEnabled = true + } +} + +func unixStrmDgramEnabled() bool { + stackOnce.Do(probeStack) + return unStrmDgramEnabled +} + +// SupportsIPv4 reports whether the platform supports IPv4 networking +// functionality. +func SupportsIPv4() bool { + stackOnce.Do(probeStack) + return ipv4Enabled +} + +// SupportsIPv6 reports whether the platform supports IPv6 networking +// functionality. +func SupportsIPv6() bool { + stackOnce.Do(probeStack) + return ipv6Enabled +} + +// SupportsRawSocket reports whether the current session is available +// to use raw sockets. +func SupportsRawSocket() bool { + stackOnce.Do(probeStack) + return rawSocketSess +} + +// TestableNetwork reports whether network is testable on the current +// platform configuration. +// +// See func Dial of the standard library for the supported networks. +func TestableNetwork(network string) bool { + ss := strings.Split(network, ":") + switch ss[0] { + case "ip+nopriv": + // This is an internal network name for testing on the + // package net of the standard library. + switch runtime.GOOS { + case "android", "fuchsia", "hurd", "ios", "js", "nacl", "plan9", "wasip1", "windows": + return false + } + case "ip", "ip4", "ip6": + switch runtime.GOOS { + case "fuchsia", "hurd", "js", "nacl", "plan9", "wasip1": + return false + default: + if os.Getuid() != 0 { + return false + } + } + case "unix", "unixgram": + switch runtime.GOOS { + case "android", "fuchsia", "hurd", "ios", "js", "nacl", "plan9", "wasip1", "windows": + return false + case "aix": + return unixStrmDgramEnabled() + } + case "unixpacket": + switch runtime.GOOS { + case "aix", "android", "fuchsia", "hurd", "darwin", "ios", "js", "nacl", "plan9", "wasip1", "windows", "zos": + return false + } + } + switch ss[0] { + case "tcp4", "udp4", "ip4": + return SupportsIPv4() + case "tcp6", "udp6", "ip6": + return SupportsIPv6() + } + return true +} + +// TestableAddress reports whether address of network is testable on +// the current platform configuration. +func TestableAddress(network, address string) bool { + switch ss := strings.Split(network, ":"); ss[0] { + case "unix", "unixgram", "unixpacket": + // Abstract unix domain sockets, a Linux-ism. + if address[0] == '@' && runtime.GOOS != "linux" { + return false + } + } + return true +} + +// NewLocalListener returns a listener which listens to a loopback IP +// address or local file system path. +// +// The provided network must be "tcp", "tcp4", "tcp6", "unix" or +// "unixpacket". +func NewLocalListener(network string) (net.Listener, error) { + stackOnce.Do(probeStack) + switch network { + case "tcp": + if canListenTCP4OnLoopback { + if ln, err := net.Listen("tcp4", "127.0.0.1:0"); err == nil { + return ln, nil + } + } + if canListenTCP6OnLoopback { + return net.Listen("tcp6", "[::1]:0") + } + case "tcp4": + if canListenTCP4OnLoopback { + return net.Listen("tcp4", "127.0.0.1:0") + } + case "tcp6": + if canListenTCP6OnLoopback { + return net.Listen("tcp6", "[::1]:0") + } + case "unix", "unixpacket": + path, err := LocalPath() + if err != nil { + return nil, err + } + return net.Listen(network, path) + } + return nil, fmt.Errorf("%s is not supported on %s/%s", network, runtime.GOOS, runtime.GOARCH) +} + +// NewLocalPacketListener returns a packet listener which listens to a +// loopback IP address or local file system path. +// +// The provided network must be "udp", "udp4", "udp6" or "unixgram". +func NewLocalPacketListener(network string) (net.PacketConn, error) { + stackOnce.Do(probeStack) + switch network { + case "udp": + if canListenTCP4OnLoopback { + if c, err := net.ListenPacket("udp4", "127.0.0.1:0"); err == nil { + return c, nil + } + } + if canListenTCP6OnLoopback { + return net.ListenPacket("udp6", "[::1]:0") + } + case "udp4": + if canListenTCP4OnLoopback { + return net.ListenPacket("udp4", "127.0.0.1:0") + } + case "udp6": + if canListenTCP6OnLoopback { + return net.ListenPacket("udp6", "[::1]:0") + } + case "unixgram": + path, err := LocalPath() + if err != nil { + return nil, err + } + return net.ListenPacket(network, path) + } + return nil, fmt.Errorf("%s is not supported on %s/%s", network, runtime.GOOS, runtime.GOARCH) +} + +// LocalPath returns a local path that can be used for Unix-domain +// protocol testing. +func LocalPath() (string, error) { + dir := "" + if runtime.GOOS == "darwin" { + dir = "/tmp" + } + f, err := ioutil.TempFile(dir, "go-nettest") + if err != nil { + return "", err + } + path := f.Name() + f.Close() + os.Remove(path) + return path, nil +} + +// MulticastSource returns a unicast IP address on ifi when ifi is an +// IP multicast-capable network interface. +// +// The provided network must be "ip", "ip4" or "ip6". +func MulticastSource(network string, ifi *net.Interface) (net.IP, error) { + switch network { + case "ip", "ip4", "ip6": + default: + return nil, errNoAvailableAddress + } + if ifi == nil || ifi.Flags&net.FlagUp == 0 || ifi.Flags&net.FlagMulticast == 0 { + return nil, errNoAvailableAddress + } + ip, ok := hasRoutableIP(network, ifi) + if !ok { + return nil, errNoAvailableAddress + } + return ip, nil +} + +// LoopbackInterface returns an available logical network interface +// for loopback test. +func LoopbackInterface() (*net.Interface, error) { + ift, err := net.Interfaces() + if err != nil { + return nil, errNoAvailableInterface + } + for _, ifi := range ift { + if ifi.Flags&net.FlagLoopback != 0 && ifi.Flags&net.FlagUp != 0 { + return &ifi, nil + } + } + return nil, errNoAvailableInterface +} + +// RoutedInterface returns a network interface that can route IP +// traffic and satisfies flags. +// +// The provided network must be "ip", "ip4" or "ip6". +func RoutedInterface(network string, flags net.Flags) (*net.Interface, error) { + switch network { + case "ip", "ip4", "ip6": + default: + return nil, errNoAvailableInterface + } + ift, err := net.Interfaces() + if err != nil { + return nil, errNoAvailableInterface + } + for _, ifi := range ift { + if ifi.Flags&flags != flags { + continue + } + if _, ok := hasRoutableIP(network, &ifi); !ok { + continue + } + return &ifi, nil + } + return nil, errNoAvailableInterface +} + +func hasRoutableIP(network string, ifi *net.Interface) (net.IP, bool) { + ifat, err := ifi.Addrs() + if err != nil { + return nil, false + } + for _, ifa := range ifat { + switch ifa := ifa.(type) { + case *net.IPAddr: + if ip, ok := routableIP(network, ifa.IP); ok { + return ip, true + } + case *net.IPNet: + if ip, ok := routableIP(network, ifa.IP); ok { + return ip, true + } + } + } + return nil, false +} + +func routableIP(network string, ip net.IP) (net.IP, bool) { + if !ip.IsLoopback() && !ip.IsLinkLocalUnicast() && !ip.IsGlobalUnicast() { + return nil, false + } + switch network { + case "ip4": + if ip := ip.To4(); ip != nil { + return ip, true + } + case "ip6": + if ip.IsLoopback() { // addressing scope of the loopback address depends on each implementation + return nil, false + } + if ip := ip.To16(); ip != nil && ip.To4() == nil { + return ip, true + } + default: + if ip := ip.To4(); ip != nil { + return ip, true + } + if ip := ip.To16(); ip != nil { + return ip, true + } + } + return nil, false +} diff --git a/vendor/golang.org/x/net/nettest/nettest_stub.go b/vendor/golang.org/x/net/nettest/nettest_stub.go new file mode 100644 index 00000000..1725b6aa --- /dev/null +++ b/vendor/golang.org/x/net/nettest/nettest_stub.go @@ -0,0 +1,11 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos + +package nettest + +func supportsRawSocket() bool { + return false +} diff --git a/vendor/golang.org/x/net/nettest/nettest_unix.go b/vendor/golang.org/x/net/nettest/nettest_unix.go new file mode 100644 index 00000000..9ba269d0 --- /dev/null +++ b/vendor/golang.org/x/net/nettest/nettest_unix.go @@ -0,0 +1,21 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos + +package nettest + +import "syscall" + +func supportsRawSocket() bool { + for _, af := range []int{syscall.AF_INET, syscall.AF_INET6} { + s, err := syscall.Socket(af, syscall.SOCK_RAW, 0) + if err != nil { + continue + } + syscall.Close(s) + return true + } + return false +} diff --git a/vendor/golang.org/x/net/nettest/nettest_windows.go b/vendor/golang.org/x/net/nettest/nettest_windows.go new file mode 100644 index 00000000..4939964d --- /dev/null +++ b/vendor/golang.org/x/net/nettest/nettest_windows.go @@ -0,0 +1,26 @@ +// Copyright 2019 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package nettest + +import "syscall" + +func supportsRawSocket() bool { + // From http://msdn.microsoft.com/en-us/library/windows/desktop/ms740548.aspx: + // Note: To use a socket of type SOCK_RAW requires administrative privileges. + // Users running Winsock applications that use raw sockets must be a member of + // the Administrators group on the local computer, otherwise raw socket calls + // will fail with an error code of WSAEACCES. On Windows Vista and later, access + // for raw sockets is enforced at socket creation. In earlier versions of Windows, + // access for raw sockets is enforced during other socket operations. + for _, af := range []int{syscall.AF_INET, syscall.AF_INET6} { + s, err := syscall.Socket(af, syscall.SOCK_RAW, 0) + if err != nil { + continue + } + syscall.Closesocket(s) + return true + } + return false +} diff --git a/vendor/google.golang.org/grpc/clientconn.go b/vendor/google.golang.org/grpc/clientconn.go index e3eb44d5..c7f26071 100644 --- a/vendor/google.golang.org/grpc/clientconn.go +++ b/vendor/google.golang.org/grpc/clientconn.go @@ -196,8 +196,6 @@ func NewClient(target string, opts ...DialOption) (conn *ClientConn, err error) } // Dial calls DialContext(context.Background(), target, opts...). -// -// Deprecated: use NewClient instead. Will be supported throughout 1.x. func Dial(target string, opts ...DialOption) (*ClientConn, error) { return DialContext(context.Background(), target, opts...) } @@ -211,8 +209,6 @@ func Dial(target string, opts ...DialOption) (*ClientConn, error) { // "passthrough" for backward compatibility. This distinction should not matter // to most users, but could matter to legacy users that specify a custom dialer // and expect it to receive the target string directly. -// -// Deprecated: use NewClient instead. Will be supported throughout 1.x. func DialContext(ctx context.Context, target string, opts ...DialOption) (conn *ClientConn, err error) { // At the end of this method, we kick the channel out of idle, rather than // waiting for the first rpc. @@ -837,7 +833,7 @@ func (cc *ClientConn) newAddrConnLocked(addrs []resolver.Address, opts balancer. addrs: copyAddressesWithoutBalancerAttributes(addrs), scopts: opts, dopts: cc.dopts, - channelz: channelz.RegisterSubChannel(cc.channelz.ID, ""), + channelz: channelz.RegisterSubChannel(cc.channelz, ""), resetBackoff: make(chan struct{}), stateChan: make(chan struct{}), } diff --git a/vendor/google.golang.org/grpc/internal/channelz/funcs.go b/vendor/google.golang.org/grpc/internal/channelz/funcs.go index f461e9bc..03e24e15 100644 --- a/vendor/google.golang.org/grpc/internal/channelz/funcs.go +++ b/vendor/google.golang.org/grpc/internal/channelz/funcs.go @@ -143,20 +143,21 @@ func RegisterChannel(parent *Channel, target string) *Channel { // Returns a unique channelz identifier assigned to this subChannel. // // If channelz is not turned ON, the channelz database is not mutated. -func RegisterSubChannel(pid int64, ref string) *SubChannel { +func RegisterSubChannel(parent *Channel, ref string) *SubChannel { id := IDGen.genID() - if !IsOn() { - return &SubChannel{ID: id} + sc := &SubChannel{ + ID: id, + RefName: ref, + parent: parent, } - sc := &SubChannel{ - RefName: ref, - ID: id, - sockets: make(map[int64]string), - parent: db.getChannel(pid), - trace: &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())}, + if !IsOn() { + return sc } - db.addSubChannel(id, sc, pid) + + sc.sockets = make(map[int64]string) + sc.trace = &ChannelTrace{CreationTime: time.Now(), Events: make([]*traceEvent, 0, getMaxTraceEntry())} + db.addSubChannel(id, sc, parent.ID) return sc } diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index eaf5dbce..2556f758 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.63.0" +const Version = "1.63.2" diff --git a/vendor/modules.txt b/vendor/modules.txt index ee0d8c2f..8f1a21f0 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -15,8 +15,8 @@ github.com/cespare/xxhash/v2 ## explicit; go 1.13 github.com/coredns/caddy github.com/coredns/caddy/caddyfile -# github.com/coredns/coredns v1.10.0 -## explicit; go 1.17 +# github.com/coredns/coredns v1.11.3 +## explicit; go 1.21 github.com/coredns/coredns/core/dnsserver github.com/coredns/coredns/coremain github.com/coredns/coredns/pb @@ -35,7 +35,6 @@ github.com/coredns/coredns/plugin/pkg/doh github.com/coredns/coredns/plugin/pkg/edns github.com/coredns/coredns/plugin/pkg/fuzz github.com/coredns/coredns/plugin/pkg/log -github.com/coredns/coredns/plugin/pkg/nonwriter github.com/coredns/coredns/plugin/pkg/parse github.com/coredns/coredns/plugin/pkg/rcode github.com/coredns/coredns/plugin/pkg/response @@ -54,7 +53,7 @@ github.com/coreos/go-systemd/v22/daemon # github.com/cpuguy83/go-md2man/v2 v2.0.0 ## explicit; go 1.12 github.com/cpuguy83/go-md2man/v2/md2man -# github.com/davecgh/go-spew v1.1.1 +# github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc ## explicit github.com/davecgh/go-spew/spew # github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 @@ -106,8 +105,8 @@ github.com/go-logr/stdr # github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 ## explicit; go 1.13 github.com/go-task/slim-sprig -# github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 -## explicit +# github.com/gobwas/httphead v0.1.0 +## explicit; go 1.15 github.com/gobwas/httphead # github.com/gobwas/pool v0.2.1 ## explicit @@ -115,13 +114,10 @@ github.com/gobwas/pool github.com/gobwas/pool/internal/pmath github.com/gobwas/pool/pbufio github.com/gobwas/pool/pbytes -# github.com/gobwas/ws v1.0.4 -## explicit +# github.com/gobwas/ws v1.2.1 +## explicit; go 1.15 github.com/gobwas/ws github.com/gobwas/ws/wsutil -# github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 -## explicit -github.com/golang-collections/collections/queue # github.com/golang/protobuf v1.5.4 ## explicit; go 1.17 github.com/golang/protobuf/proto @@ -129,8 +125,8 @@ github.com/golang/protobuf/proto ## explicit; go 1.12 github.com/google/gopacket github.com/google/gopacket/layers -# github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 -## explicit; go 1.14 +# github.com/google/pprof v0.0.0-20230817174616-7a8ec2ada47b +## explicit; go 1.19 github.com/google/pprof/profile # github.com/google/uuid v1.6.0 ## explicit @@ -162,11 +158,11 @@ github.com/mattn/go-colorable # github.com/mattn/go-isatty v0.0.16 ## explicit; go 1.15 github.com/mattn/go-isatty -# github.com/matttproud/golang_protobuf_extensions v1.0.1 -## explicit +# github.com/matttproud/golang_protobuf_extensions v1.0.4 +## explicit; go 1.9 github.com/matttproud/golang_protobuf_extensions/pbutil -# github.com/miekg/dns v1.1.50 -## explicit; go 1.14 +# github.com/miekg/dns v1.1.58 +## explicit; go 1.19 github.com/miekg/dns # github.com/mitchellh/go-homedir v1.1.0 ## explicit @@ -177,7 +173,7 @@ github.com/modern-go/concurrent # github.com/modern-go/reflect2 v1.0.2 ## explicit; go 1.12 github.com/modern-go/reflect2 -# github.com/onsi/ginkgo/v2 v2.9.5 +# github.com/onsi/ginkgo/v2 v2.13.0 ## explicit; go 1.18 github.com/onsi/ginkgo/v2/config github.com/onsi/ginkgo/v2/formatter @@ -212,10 +208,10 @@ github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal github.com/prometheus/client_golang/prometheus/promauto github.com/prometheus/client_golang/prometheus/promhttp -# github.com/prometheus/client_model v0.5.0 +# github.com/prometheus/client_model v0.6.0 ## explicit; go 1.19 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.48.0 +# github.com/prometheus/common v0.53.0 ## explicit; go 1.20 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -353,10 +349,11 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/ipv4 golang.org/x/net/ipv6 +golang.org/x/net/nettest golang.org/x/net/proxy golang.org/x/net/trace golang.org/x/net/websocket -# golang.org/x/oauth2 v0.17.0 +# golang.org/x/oauth2 v0.18.0 ## explicit; go 1.18 golang.org/x/oauth2 golang.org/x/oauth2/internal @@ -422,13 +419,13 @@ google.golang.org/appengine/internal/log google.golang.org/appengine/internal/remote_api google.golang.org/appengine/internal/urlfetch google.golang.org/appengine/urlfetch -# google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de +# google.golang.org/genproto/googleapis/api v0.0.0-20240311132316-a219d84964c2 ## explicit; go 1.19 google.golang.org/genproto/googleapis/api/httpbody -# google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de +# google.golang.org/genproto/googleapis/rpc v0.0.0-20240318140521-94a12d6c2237 ## explicit; go 1.19 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.63.0 +# google.golang.org/grpc v1.63.2 ## explicit; go 1.19 google.golang.org/grpc google.golang.org/grpc/attributes