Merge branch 'master' into patch-1

This commit is contained in:
João Oliveirinha 2022-10-12 11:53:07 +01:00 committed by GitHub
commit 3edc2e472c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1233 changed files with 134702 additions and 55385 deletions

View File

@ -1,8 +1,12 @@
images:
- name: cloudflared
dockerfile: Dockerfile
dockerfile: Dockerfile.$ARCH
context: .
version_file: versions
registries:
- name: docker.io/cloudflare
user: env:DOCKER_USER
password: env:DOCKER_PASSWORD
architectures:
- amd64
- arm64

View File

@ -1,9 +1,8 @@
---
name: "Bug report \U0001F41B"
name: "\U0001F41B Bug report"
about: Create a report to help us improve cloudflared
title: ''
labels: awaiting reply, bug
assignees: ''
title: "\U0001F41B"
labels: 'Priority: Normal, Type: Bug'
---
@ -16,6 +15,10 @@ Steps to reproduce the behavior:
2. Run '....'
3. See error
If it's an issue with Cloudflare Tunnel:
4. Tunnel ID :
5. cloudflared config:
**Expected behavior**
A clear and concise description of what you expected to happen.

View File

@ -0,0 +1,16 @@
---
name: "\U0001F4DD Documentation"
about: Request new or updated documentation for cloudflared
title: "\U0001F4DD"
labels: 'Priority: Normal, Type: Documentation'
---
**Available Documentation**
A link to the documentation that is available today and the areas which could be improved.
**Suggested Documentation**
A clear and concise description of the documentation, tutorial, or guide that should be added.
**Additional context**
Add any other context or screenshots about the documentation request here.

View File

@ -1,9 +1,8 @@
---
name: "Feature request \U0001F4A1"
name: "\U0001F4A1 Feature request"
about: Suggest a feature or enhancement for cloudflared
title: ''
labels: awaiting reply, feature-request
assignees: ''
title: "\U0001F4A1"
labels: 'Priority: Normal, Type: Feature Request'
---

View File

@ -4,7 +4,7 @@ jobs:
check:
strategy:
matrix:
go-version: [1.17.x]
go-version: [1.19.x]
os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }}
steps:
@ -12,8 +12,6 @@ jobs:
uses: actions/setup-go@v2
with:
go-version: ${{ matrix.go-version }}
- name: Install go-sumtype
run: go get github.com/sudarshan-reddy/go-sumtype
- name: Checkout code
uses: actions/checkout@v2
- name: Test

1
.gitignore vendored
View File

@ -10,6 +10,7 @@ cscope.*
/cloudflared.exe
/cloudflared.msi
/cloudflared-x86-64*
/cloudflared.1
/packaging
.DS_Store
*-session.log

17
.teamcity/package-windows.sh vendored Executable file
View File

@ -0,0 +1,17 @@
VERSION=$(git describe --tags --always --match "[0-9][0-9][0-9][0-9].*.*")
echo $VERSION
export TARGET_OS=windows
# This controls the directory the built artifacts go into
export ARTIFACT_DIR=built_artifacts/
mkdir -p $ARTIFACT_DIR
windowsArchs=("amd64" "386")
for arch in ${windowsArchs[@]}; do
export TARGET_ARCH=$arch
# Copy exe into final directory
cp ./artifacts/cloudflared-windows-$arch.exe $ARTIFACT_DIR/cloudflared-windows-$arch.exe
cp ./artifacts/cloudflared-windows-$arch.exe ./cloudflared.exe
make cloudflared-msi
# Copy msi into final directory
mv cloudflared-$VERSION-$arch.msi $ARTIFACT_DIR/cloudflared-windows-$arch.msi
done

26
.teamcity/update-homebrew-core.sh vendored Executable file
View File

@ -0,0 +1,26 @@
#!/bin/bash
set -euo pipefail
if ! VERSION="$(git describe --tags --exact-match 2>/dev/null)" ; then
echo "Skipping public release for an untagged commit."
echo "##teamcity[buildStatus status='SUCCESS' text='Skipped due to lack of tag']"
exit 0
fi
if [[ "${HOMEBREW_GITHUB_API_TOKEN:-}" == "" ]] ; then
echo "Missing GITHUB_API_TOKEN"
exit 1
fi
# "install" Homebrew
git clone https://github.com/Homebrew/brew tmp/homebrew
eval "$(tmp/homebrew/bin/brew shellenv)"
brew update --force --quiet
chmod -R go-w "$(brew --prefix)/share/zsh"
git config --global user.name "cloudflare-warp-bot"
git config --global user.email "warp-bot@cloudflare.com"
# bump formula pr
brew bump-formula-pr cloudflared --version="$VERSION" --no-browse --no-audit

View File

@ -1,4 +1,75 @@
**Experimental**: This is a new format for release notes. The format and availability is subject to change.
## 2022.9.0
### New Features
- cloudflared now rejects ingress rules with invalid http status codes for http_status.
## 2022.8.1
### New Features
- cloudflared now remembers if it connected to a certain protocol successfully. If it did, it does not fall back to a lower
protocol on connection failures.
## 2022.7.1
### New Features
- It is now possible to connect cloudflared tunnel to Cloudflare Global Network with IPv6. See `cloudflared tunnel --help` and look for `edge-ip-version` for more information. For now, the default behavior is to still connect with IPv4 only.
### Bug Fixes
- Several bug fixes related with QUIC transport (used between cloudflared tunnel and Cloudflare Global Network). Updating to this version is highly recommended.
## 2022.4.0
### Bug Fixes
- `cloudflared tunnel run` no longer logs the Tunnel token or JSON credentials in clear text as those are the secret
that allows to run the Tunnel.
## 2022.3.4
### New Features
- It is now possible to retrieve the credentials that allow to run a Tunnel in case you forgot/lost them. This is
achievable with: `cloudflared tunnel token --cred-file /path/to/file.json TUNNEL`. This new feature only works for
Tunnels created with cloudflared version 2022.3.0 or more recent.
### Bug Fixes
- `cloudflared service install` now starts the underlying agent service on Linux operating system (similarly to the
behaviour in Windows and MacOS).
## 2022.3.3
### Bug Fixes
- `cloudflared service install` now starts the underlying agent service on Windows operating system (similarly to the
behaviour in MacOS).
## 2022.3.1
### Bug Fixes
- Various fixes to the reliability of `quic` protocol, including an edge case that could lead to cloudflared crashing.
## 2022.3.0
### New Features
- It is now possible to configure Ingress Rules to point to an origin served by unix socket with either HTTP or HTTPS.
If the origin starts with `unix:/` then we assume HTTP (existing behavior). Otherwise, the origin can start with
`unix+tls:/` for HTTPS.
## 2022.2.1
### New Features
- This project now has a new LICENSE that is more compliant with open source purposes.
### Bug Fixes
- Various fixes to the reliability of `quic` protocol.
## 2022.1.3
### New Features
- New `cloudflared tunnel vnet` commands to allow for private routing to be virtualized. This means that the same CIDR
can now be used to point to two different Tunnels with `cloudflared tunnel route ip` command. More information will be
made available on blog.cloudflare.com and developers.cloudflare.com/cloudflare-one once the feature is globally available.
### Bug Fixes
- Correctly handle proxying UDP datagrams with no payload.
- Bug fix for origins that use Server-Sent Events (SSE).
## 2022.1.0
### Improvements
- If a specific `protocol` property is defined (e.g. for `quic`), cloudflared no longer falls back to an older protocol
(such as `http2`) in face of connectivity errors. This is important because some features are only supported in a specific
protocol (e.g. UDP proxying only works for `quic`). Hence, if a user chooses a protocol, cloudflared now adheres to it
no matter what.
### Bug Fixes
- Stopping cloudflared running with `quic` protocol now respects graceful shutdown.
## 2021.12.2
### Bug Fixes

View File

@ -1,12 +1,12 @@
# use a builder image for building cloudflare
ARG TARGET_GOOS
ARG TARGET_GOARCH
FROM golang:1.17.1 as builder
FROM golang:1.19 as builder
ENV GO111MODULE=on \
CGO_ENABLED=0 \
TARGET_GOOS=${TARGET_GOOS} \
TARGET_GOARCH=${TARGET_GOARCH}
WORKDIR /go/src/github.com/cloudflare/cloudflared/
# copy our sources into the builder image
@ -16,7 +16,9 @@ COPY . .
RUN make cloudflared
# use a distroless base image with glibc
FROM gcr.io/distroless/base-debian10:nonroot
FROM gcr.io/distroless/base-debian11:nonroot
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"
# copy our compiled binary
COPY --from=builder --chown=nonroot /go/src/github.com/cloudflare/cloudflared/cloudflared /usr/local/bin/

27
Dockerfile.amd64 Normal file
View File

@ -0,0 +1,27 @@
# use a builder image for building cloudflare
FROM golang:1.19 as builder
ENV GO111MODULE=on \
CGO_ENABLED=0
WORKDIR /go/src/github.com/cloudflare/cloudflared/
# copy our sources into the builder image
COPY . .
# compile cloudflared
RUN GOOS=linux GOARCH=amd64 make cloudflared
# use a distroless base image with glibc
FROM gcr.io/distroless/base-debian11:nonroot
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"
# copy our compiled binary
COPY --from=builder --chown=nonroot /go/src/github.com/cloudflare/cloudflared/cloudflared /usr/local/bin/
# run as non-privileged user
USER nonroot
# command / entrypoint of container
ENTRYPOINT ["cloudflared", "--no-autoupdate"]
CMD ["version"]

27
Dockerfile.arm64 Normal file
View File

@ -0,0 +1,27 @@
# use a builder image for building cloudflare
FROM golang:1.19 as builder
ENV GO111MODULE=on \
CGO_ENABLED=0
WORKDIR /go/src/github.com/cloudflare/cloudflared/
# copy our sources into the builder image
COPY . .
# compile cloudflared
RUN GOOS=linux GOARCH=arm64 make cloudflared
# use a distroless base image with glibc
FROM gcr.io/distroless/base-debian11:nonroot-arm64
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"
# copy our compiled binary
COPY --from=builder --chown=nonroot /go/src/github.com/cloudflare/cloudflared/cloudflared /usr/local/bin/
# run as non-privileged user
USER nonroot
# command / entrypoint of container
ENTRYPOINT ["cloudflared", "--no-autoupdate"]
CMD ["version"]

347
LICENSE
View File

@ -1,211 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
1. Definitions.
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
1. Definitions.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
END OF TERMS AND CONDITIONS
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
APPENDIX: How to apply the Apache License to your work.
END OF TERMS AND CONDITIONS
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
APPENDIX: How to apply the Apache License to your work.
Copyright [yyyy] [name of copyright owner]
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Copyright [yyyy] [name of copyright owner]
http://www.apache.org/licenses/LICENSE-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
http://www.apache.org/licenses/LICENSE-2.0
## Runtime Library Exception to the Apache 2.0 License: ##
As an exception, if you use this Software to compile your source code and
portions of this Software are embedded into the binary product as a result,
you may redistribute such product without providing attribution as would
otherwise be required by Sections 4(a), 4(b) and 4(d) of the License.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -23,6 +23,9 @@ endif
DATE := $(shell date -u '+%Y-%m-%d-%H%M UTC')
VERSION_FLAGS := -X "main.Version=$(VERSION)" -X "main.BuildTime=$(DATE)"
ifdef PACKAGE_MANAGER
VERSION_FLAGS := $(VERSION_FLAGS) -X "github.com/cloudflare/cloudflared/cmd/cloudflared/updater.BuiltForPackageManager=$(PACKAGE_MANAGER)"
endif
LINK_FLAGS :=
ifeq ($(FIPS), true)
@ -37,10 +40,15 @@ ifneq ($(GO_BUILD_TAGS),)
GO_BUILD_TAGS := -tags "$(GO_BUILD_TAGS)"
endif
IMPORT_PATH := github.com/cloudflare/cloudflared
PACKAGE_DIR := $(CURDIR)/packaging
INSTALL_BINDIR := /usr/bin/
MAN_DIR := /usr/share/man/man1/
ifeq ($(debug), 1)
GO_BUILD_TAGS += -gcflags="all=-N -l"
endif
IMPORT_PATH := github.com/cloudflare/cloudflared
PACKAGE_DIR := $(CURDIR)/packaging
PREFIX := /usr
INSTALL_BINDIR := $(PREFIX)/bin/
INSTALL_MANDIR := $(PREFIX)/share/man/man1/
LOCAL_ARCH ?= $(shell uname -m)
ifneq ($(GOARCH),)
@ -59,6 +67,8 @@ else ifeq ($(LOCAL_ARCH),arm64)
TARGET_ARCH ?= arm64
else ifeq ($(shell echo $(LOCAL_ARCH) | head -c 4),armv)
TARGET_ARCH ?= arm
else ifeq ($(LOCAL_ARCH),s390x)
TARGET_ARCH ?= s390x
else
$(error This system's architecture $(LOCAL_ARCH) isn't supported)
endif
@ -88,6 +98,16 @@ else
TARGET_PUBLIC_REPO ?= $(FLAVOR)
endif
ifneq ($(TARGET_ARM), )
ARM_COMMAND := GOARM=$(TARGET_ARM)
endif
ifeq ($(TARGET_ARM), 7)
PACKAGE_ARCH := armhf
else
PACKAGE_ARCH := $(TARGET_ARCH)
endif
.PHONY: all
all: cloudflared test
@ -101,7 +121,7 @@ ifeq ($(FIPS), true)
$(info Building cloudflared with go-fips)
cp -f fips/fips.go.linux-amd64 cmd/cloudflared/fips.go
endif
GOOS=$(TARGET_OS) GOARCH=$(TARGET_ARCH) go build -v -mod=vendor $(GO_BUILD_TAGS) $(LDFLAGS) $(IMPORT_PATH)/cmd/cloudflared
GOOS=$(TARGET_OS) GOARCH=$(TARGET_ARCH) $(ARM_COMMAND) go build -v -mod=vendor $(GO_BUILD_TAGS) $(LDFLAGS) $(IMPORT_PATH)/cmd/cloudflared
ifeq ($(FIPS), true)
rm -f cmd/cloudflared/fips.go
./check-fips.sh cloudflared
@ -111,6 +131,10 @@ endif
container:
docker build --build-arg=TARGET_ARCH=$(TARGET_ARCH) --build-arg=TARGET_OS=$(TARGET_OS) -t cloudflare/cloudflared-$(TARGET_OS)-$(TARGET_ARCH):"$(VERSION)" .
.PHONY: generate-docker-version
generate-docker-version:
echo latest $(VERSION) > versions
.PHONY: test
test: vet
ifndef CI
@ -125,52 +149,44 @@ endif
test-ssh-server:
docker-compose -f ssh_server_tests/docker-compose.yml up
define publish_package
chmod 664 $(BINARY_NAME)*.$(1); \
for HOST in $(CF_PKG_HOSTS); do \
ssh-keyscan -t ecdsa $$HOST >> ~/.ssh/known_hosts; \
scp -p -4 $(BINARY_NAME)*.$(1) cfsync@$$HOST:/state/cf-pkg/staging/$(2)/$(TARGET_PUBLIC_REPO)/$(BINARY_NAME)/; \
done
endef
cloudflared.1: cloudflared_man_template
sed -e 's/\$${VERSION}/$(VERSION)/; s/\$${DATE}/$(DATE)/' cloudflared_man_template > cloudflared.1
.PHONY: publish-deb
publish-deb: cloudflared-deb
$(call publish_package,deb,apt)
.PHONY: publish-rpm
publish-rpm: cloudflared-rpm
$(call publish_package,rpm,yum)
install: cloudflared cloudflared.1
mkdir -p $(DESTDIR)$(INSTALL_BINDIR) $(DESTDIR)$(INSTALL_MANDIR)
install -m755 cloudflared $(DESTDIR)$(INSTALL_BINDIR)/cloudflared
install -m644 cloudflared.1 $(DESTDIR)$(INSTALL_MANDIR)/cloudflared.1
# When we build packages, the package name will be FIPS-aware.
# But we keep the binary installed by it to be named "cloudflared" regardless.
define build_package
mkdir -p $(PACKAGE_DIR)
cp cloudflared $(PACKAGE_DIR)/cloudflared
cat cloudflared_man_template | sed -e 's/\$${VERSION}/$(VERSION)/; s/\$${DATE}/$(DATE)/' > $(PACKAGE_DIR)/cloudflared.1
cp cloudflared.1 $(PACKAGE_DIR)/cloudflared.1
fakeroot fpm -C $(PACKAGE_DIR) -s dir -t $(1) \
--description 'Cloudflare Tunnel daemon' \
--vendor 'Cloudflare' \
--license 'Cloudflare Service Agreement' \
--license 'Apache License Version 2.0' \
--url 'https://github.com/cloudflare/cloudflared' \
-m 'Cloudflare <support@cloudflare.com>' \
-a $(TARGET_ARCH) -v $(VERSION) -n $(DEB_PACKAGE_NAME) $(NIGHTLY_FLAGS) --after-install postinst.sh --after-remove postrm.sh \
cloudflared=$(INSTALL_BINDIR) cloudflared.1=$(MAN_DIR)
-a $(PACKAGE_ARCH) -v $(VERSION) -n $(DEB_PACKAGE_NAME) $(NIGHTLY_FLAGS) --after-install postinst.sh --after-remove postrm.sh \
cloudflared=$(INSTALL_BINDIR) cloudflared.1=$(INSTALL_MANDIR)
endef
.PHONY: cloudflared-deb
cloudflared-deb: cloudflared
cloudflared-deb: cloudflared cloudflared.1
$(call build_package,deb)
.PHONY: cloudflared-rpm
cloudflared-rpm: cloudflared
cloudflared-rpm: cloudflared cloudflared.1
$(call build_package,rpm)
.PHONY: cloudflared-pkg
cloudflared-pkg: cloudflared
cloudflared-pkg: cloudflared cloudflared.1
$(call build_package,osxpkg)
.PHONY: cloudflared-msi
cloudflared-msi: cloudflared
cloudflared-msi:
wixl --define Version=$(VERSION) --define Path=$(EXECUTABLE_PATH) --output cloudflared-$(VERSION)-$(TARGET_ARCH).msi cloudflared.wxs
.PHONY: cloudflared-darwin-amd64.tgz
@ -247,6 +263,10 @@ github-release: cloudflared
github-release-built-pkgs:
python3 github_release.py --path $(PWD)/built_artifacts --release-version $(VERSION)
.PHONY: release-pkgs-linux
release-pkgs-linux:
python3 ./release_pkgs.py
.PHONY: github-message
github-message:
python3 github_message.py --release-version $(VERSION)
@ -256,10 +276,17 @@ github-mac-upload:
python3 github_release.py --path artifacts/cloudflared-darwin-amd64.tgz --release-version $(VERSION) --name cloudflared-darwin-amd64.tgz
python3 github_release.py --path artifacts/cloudflared-amd64.pkg --release-version $(VERSION) --name cloudflared-amd64.pkg
.PHONY: github-windows-upload
github-windows-upload:
python3 github_release.py --path built_artifacts/cloudflared-windows-amd64.exe --release-version $(VERSION) --name cloudflared-windows-amd64.exe
python3 github_release.py --path built_artifacts/cloudflared-windows-amd64.msi --release-version $(VERSION) --name cloudflared-windows-amd64.msi
python3 github_release.py --path built_artifacts/cloudflared-windows-386.exe --release-version $(VERSION) --name cloudflared-windows-386.exe
python3 github_release.py --path built_artifacts/cloudflared-windows-386.msi --release-version $(VERSION) --name cloudflared-windows-386.msi
.PHONY: tunnelrpc-deps
tunnelrpc-deps:
which capnp # https://capnproto.org/install.html
which capnpc-go # go get zombiezen.com/go/capnproto2/capnpc-go
which capnpc-go # go install zombiezen.com/go/capnproto2/capnpc-go@latest
capnp compile -ogo tunnelrpc/tunnelrpc.capnp
.PHONY: quic-deps
@ -270,13 +297,8 @@ quic-deps:
.PHONY: vet
vet:
go vet -mod=vendor ./...
# go get github.com/sudarshan-reddy/go-sumtype (don't do this in build directory or this will cause vendor issues)
# Note: If you have github.com/BurntSushi/go-sumtype then you might have to use the repo above instead
# for now because it uses an older version of golang.org/x/tools.
which go-sumtype
go-sumtype $$(go list -mod=vendor ./...)
go vet -v -mod=vendor ./...
.PHONY: goimports
goimports:
for d in $$(go list -mod=readonly -f '{{.Dir}}' -a ./... | fgrep -v tunnelrpc) ; do goimports -format-only -local github.com/cloudflare/cloudflared -w $$d ; done
for d in $$(go list -mod=readonly -f '{{.Dir}}' -a ./... | fgrep -v tunnelrpc) ; do goimports -format-only -local github.com/cloudflare/cloudflared -w $$d ; done

View File

@ -25,12 +25,13 @@ routing), but for legacy reasons this requirement is still necessary:
## Installing `cloudflared`
Downloads are available as standalone binaries, a Docker image, and Debian, RPM, and Homebrew packages. You can also find releases here on the `cloudflared` GitHub repository.
Downloads are available as standalone binaries, a Docker image, and Debian, RPM, and Homebrew packages. You can also find releases [here](https://github.com/cloudflare/cloudflared/releases) on the `cloudflared` GitHub repository.
* You can [install on macOS](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/installation#macos) via Homebrew or by downloading the [latest Darwin amd64 release](https://github.com/cloudflare/cloudflared/releases)
* Binaries, Debian, and RPM packages for Linux [can be found here](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/installation#linux)
* A Docker image of `cloudflared` is [available on DockerHub](https://hub.docker.com/r/cloudflare/cloudflared)
* You can install on Windows machines with the [steps here](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/installation#windows)
* Build from source with the [instructions here](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/installation#build-from-source)
User documentation for Cloudflare Tunnel can be found at https://developers.cloudflare.com/cloudflare-one/connections/connect-apps
@ -43,7 +44,7 @@ Once installed, you can authenticate `cloudflared` into your Cloudflare account
* Route traffic to that Tunnel:
* Via public [DNS records in Cloudflare](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/routing-to-tunnel/dns)
* Or via a public hostname guided by a [Cloudflare Load Balancer](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/routing-to-tunnel/lb)
* Or from [WARP client private traffic](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/configuration/private-networks)
* Or from [WARP client private traffic](https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/private-net/)
## TryCloudflare

View File

@ -1,3 +1,256 @@
2022.10.0
- 2022-09-30 TUN-6755: Remove unused publish functions
- 2022-09-30 TUN-6813: Only proxy ICMP packets when warp-routing is enabled
- 2022-09-29 TUN-6811: Ping group range should be parsed as int32
- 2022-09-29 TUN-6812: Drop IP packets if ICMP proxy is not initialized
- 2022-09-28 TUN-6716: Document limitation of Windows ICMP proxy
- 2022-09-28 TUN-6810: Add component test for post-quantum
- 2022-09-27 TUN-6715: Provide suggestion to add cloudflared to ping_group_range if it failed to open ICMP socket
- 2022-09-22 TUN-6792: Fix brew core release by not auditing the formula
- 2022-09-22 TUN-6774: Validate OriginRequest.Access to add Ingress.Middleware
- 2022-09-22 TUN-6775: Add middleware.Handler verification to ProxyHTTP
- 2022-09-22 TUN-6791: Calculate ICMPv6 checksum
- 2022-09-22 TUN-6801: Add punycode alternatives for ingress rules
- 2022-09-21 TUN-6772: Add a JWT Validator as an ingress verifier
- 2022-09-21 TUN-6772: Add a JWT Validator as an ingress verifier
- 2022-09-21 TUN-6774: Validate OriginRequest.Access to add Ingress.Middleware
- 2022-09-21 TUN-6772: Add a JWT Validator as an ingress verifier
- 2022-09-20 TUN-6741: ICMP proxy tries to listen on specific IPv4 & IPv6 when possible
2022.9.1
- 2022-09-20 TUN-6777: Fix race condition in TestFunnelIdleTimeout
- 2022-09-20 TUN-6595: Enable datagramv2 and icmp proxy by default
- 2022-09-20 TUN-6773: Add access based configuration to ingress.OriginRequestConfig
- 2022-09-19 TUN-6778: Cleanup logs about ICMP
- 2022-09-19 TUN-6779: cloudflared should also use the root CAs from system pool to validate edge certificate
- 2022-09-19 TUN-6780: Add support for certReload to also include support for client certificates
- 2022-09-16 TUN-6767: Build ICMP proxy for Windows only when CGO is enabled
- 2022-09-15 TUN-6590: Use Windows Teamcity agent to build binary
- 2022-09-13 TUN-6592: Decrement TTL and return ICMP time exceed if it's 0
- 2022-09-09 TUN-6749: Fix icmp_generic build
- 2022-09-09 TUN-6744: On posix platforms, assign unique echo ID per (src, dst, echo ID)
- 2022-09-08 TUN-6743: Support ICMPv6 echo on Windows
- 2022-09-08 TUN-6689: Utilize new RegisterUDPSession to begin tracing
- 2022-09-07 TUN-6688: Update RegisterUdpSession capnproto to include trace context
- 2022-09-06 TUN-6740: Detect no UDP packets allowed and fallback from QUIC in that case
- 2022-09-06 TUN-6654: Support ICMPv6 on Linux and Darwin
- 2022-09-02 TUN-6696: Refactor flow into funnel and close idle funnels
- 2022-09-02 TUN-6718: Bump go and go-boring 1.18.6
- 2022-08-29 TUN-6531: Implement ICMP proxy for Windows using IcmpSendEcho
- 2022-08-24 RTG-1339 Support post-quantum hybrid key exchange
2022.9.0
- 2022-09-05 TUN-6737: Fix datagramV2Type should be declared in its own block so it starts at 0
- 2022-09-01 TUN-6725: Fix testProxySSEAllData
- 2022-09-01 TUN-6726: Fix maxDatagramPayloadSize for Windows QUIC datagrams
- 2022-09-01 TUN-6729: Fix flaky TestClosePreviousProxies
- 2022-09-01 TUN-6728: Verify http status code ingress rule
- 2022-08-25 TUN-6695: Implement ICMP proxy for linux
2022.8.4
- 2022-08-31 TUN-6717: Update Github action to run with Go 1.19
- 2022-08-31 TUN-6720: Remove forcibly closing connection during reconnect signal
- 2022-08-29 Release 2022.8.3
2022.8.3
- 2022-08-26 TUN-6708: Fix replace flow logic
- 2022-08-25 TUN-6705: Tunnel should retry connections forever
- 2022-08-25 TUN-6704: Honor protocol flag when edge discovery is unreachable
- 2022-08-25 TUN-6699: Add metric for packet too big dropped
- 2022-08-24 TUN-6691: Properly error check for net.ErrClosed
- 2022-08-22 TUN-6679: Allow client side of quic request to close body
- 2022-08-22 TUN-6586: Change ICMP proxy to only build for Darwin and use echo ID to track flows
- 2022-08-18 TUN-6530: Implement ICMPv4 proxy
- 2022-08-17 TUN-6666: Define packet package
- 2022-08-17 TUN-6667: DatagramMuxerV2 provides a method to receive RawPacket
- 2022-08-16 TUN-6657: Ask for Tunnel ID and Configuration on Bug Report
- 2022-08-16 TUN-6676: Add suport for trailers in http2 connections
- 2022-08-11 TUN-6575: Consume cf-trace-id from incoming http2 TCP requests
2022.8.2
- 2022-08-16 TUN-6656: Docker for arm64 should not be deployed in an amd64 container
2022.8.1
- 2022-08-15 TUN-6617: Updated CHANGES.md for protocol stickiness
- 2022-08-12 EDGEPLAT-3918: bump go and go-boring to 1.18.5
- 2022-08-12 TUN-6652: Publish dockerfile for both amd64 and arm64
- 2022-08-11 TUN-6617: Dont fallback to http2 if QUIC conn was successful.
- 2022-08-11 TUN-6617: Dont fallback to http2 if QUIC conn was successful.
- 2022-08-11 Revert "TUN-6617: Dont fallback to http2 if QUIC conn was successful."
- 2022-08-11 TUN-6617: Dont fallback to http2 if QUIC conn was successful.
- 2022-08-01 TUN-6584: Define QUIC datagram v2 format to support proxying IP packets
2022.8.0
- 2022-08-10 TUN-6637: Upgrade quic-go
- 2022-08-10 TUN-6646: Add support to SafeStreamCloser to close only write side of stream
- 2022-08-09 TUN-6642: Fix unexpected close of quic stream triggered by upstream origin close
- 2022-08-09 TUN-6639: Validate cyclic ingress configuration
- 2022-08-08 TUN-6637: Upgrade go version and quic-go
- 2022-08-08 TUN-6639: Validate cyclic ingress configuration
- 2022-08-04 EDGEPLAT-3918: build cloudflared for Bookworm
- 2022-08-02 Revert "TUN-6576: Consume cf-trace-id from incoming TCP requests to create root span"
- 2022-07-27 TUN-6601: Update gopkg.in/yaml.v3 references in modules
- 2022-07-26 TUN-6576: Consume cf-trace-id from incoming TCP requests to create root span
- 2022-07-26 TUN-6576: Consume cf-trace-id from incoming TCP requests to create root span
- 2022-07-25 TUN-6598: Remove auto assignees on github issues
- 2022-07-20 TUN-6583: Remove legacy --ui flag
- 2022-07-20 cURL supports stdin and uses os pipes directly without copying
- 2022-07-07 TUN-6517: Use QUIC stream context while proxying HTTP requests and TCP connections
2022.7.1
- 2022-07-06 TUN-6503: Fix transport fallback from QUIC in face of dial error "no network activity"
2022.7.0
- 2022-07-05 TUN-6499: Remove log that is per datagram
- 2022-06-24 TUN-6460: Rename metric label location to edge_location
- 2022-06-24 TUN-6459: Add cloudflared user-agent to access calls
- 2022-06-17 TUN-6427: Differentiate between upstream request closed/canceled and failed origin requests
- 2022-06-17 TUN-6388: Fix first tunnel connection not retrying
- 2022-06-13 TUN-6384: Correct duplicate connection error to fetch new IP first
- 2022-06-13 TUN-6373: Add edge-ip-version to remotely pushed configuration
- 2022-06-07 TUN-6010: Add component tests for --edge-ip-version
- 2022-05-20 TUN-6007: Implement new edge discovery algorithm
- 2022-02-18 Ensure service install directories are created before writing file
2022.6.3
- 2022-06-20 TUN-6362: Add armhf support to cloudflare packaging
2022.6.2
- 2022-06-13 TUN-6381: Write error data on QUIC stream when we fail to talk to the origin; separate logging for protocol errors vs. origin errors.
- 2022-06-17 TUN-6414: Remove go-sumtype from cloudflared build process
- 2022-06-01 Add Http2Origin option to force HTTP/2 origin connections
- 2022-06-02 fix ingress rules unit test
- 2022-06-09 Update remaining OriginRequestConfig functions for Http2Origins
- 2022-05-31 Add image source label to docker container.
- 2022-05-10 Warp Private Network link updated
2022.6.1
- 2022-06-14 TUN-6395: Fix writing RPM repo data
2022.6.0
- 2022-06-14 Revert "TUN-6010: Add component tests for --edge-ip-version"
- 2022-06-14 Revert "TUN-6373: Add edge-ip-version to remotely pushed configuration"
- 2022-06-14 Revert "TUN-6384: Correct duplicate connection error to fetch new IP first"
- 2022-06-14 Revert "TUN-6007: Implement new edge discovery algorithm"
- 2022-06-13 TUN-6385: Don't share err between acceptStream loop and per-stream goroutines
- 2022-06-13 TUN-6384: Correct duplicate connection error to fetch new IP first
- 2022-06-13 TUN-6373: Add edge-ip-version to remotely pushed configuration
- 2022-06-13 TUN-6380: Enforce connect and keep-alive timeouts for TCP connections in both WARP routing and websocket based TCP proxy.
- 2022-06-11 Update issue templates
- 2022-06-11 Amendment to previous PR
- 2022-06-09 TUN-6347: Add TCP stream logs with FlowID
- 2022-06-08 TUN-6361: Add cloudflared arm builds to pkging as well
- 2022-06-07 TUN-6357: Add connector id to ready check endpoint
- 2022-06-07 TUN-6010: Add component tests for --edge-ip-version
- 2022-06-06 TUN-6191: Update quic-go to v0.27.1 and with custom patch to allow keep alive period to be configurable
- 2022-06-03 TUN-6343: Fix QUIC->HTTP2 fallback
- 2022-06-02 TUN-6339: Add config for IPv6 support
- 2022-06-02 TUN-6341: Fix default config value for edge-ip-version
- 2022-06-01 TUN-6323: Add Xenial and Trusty for Ubuntu pkging
- 2022-05-31 TUN-6210: Add cloudflared.repo to make it easy for yum installs
- 2022-05-30 TUN-6293: Update yaml v3 to latest hotfix
- 2022-05-20 TUN-6007: Implement new edge discovery algorithm
2022.5.3
- 2022-05-30 TUN-6308: Add debug logs to see if packets are sent/received from edge
- 2022-05-30 TUN-6301: Allow to update logger used by UDP session manager
2022.5.2
- 2022-05-23 TUN-6270: Import gpg keys from environment variables
- 2022-05-24 TUN-6209: Improve feedback process if release_pkgs to deb and rpm fail
- 2022-05-24 TUN-6280: Don't wrap qlog connection tracer for gatethering QUIC metrics since we're not writing qlog files.
- 2022-05-25 TUN-6209: Sign RPM packages
- 2022-05-25 TUN-6285: Upload pkg assets to repos when cloudflared is released.
- 2022-05-24 TUN-6282: Upgrade golang to 1.17.10, go-boring to 1.17.9
- 2022-05-26 TUN-6292: Debug builds for cloudflared
- 2022-05-28 TUN-6304: Fixed some file permission issues
- 2022-05-11 TUN-6197: Publish to brew core should not try to open the browser
- 2022-05-12 TUN-5943: Add RPM support
- 2022-05-18 TUN-6248: Fix panic in cloudflared during tracing when origin doesn't provide header map
- 2022-05-18 TUN-6250: Add upstream response status code to tracing span attributes
2022.5.1
- 2022-05-06 TUN-6146: Release_pkgs is now a generic command line script
- 2022-05-06 TUN-6185: Fix tcpOverWSOriginService not using original scheme for String representation
- 2022-05-05 TUN-6175: Simply debian packaging by structural upload
- 2022-05-05 TUN-5945: Added support for Ubuntu releases
- 2022-05-04 TUN-6054: Create and upload deb packages to R2
- 2022-05-03 TUN-6161: Set git user/email for brew core release
- 2022-05-03 TUN-6166: Fix mocked QUIC transport for UDP proxy manager to return expected error
- 2022-04-27 TUN-6016: Push local managed tunnels configuration to the edge
2022.5.0
- 2022-05-02 TUN-6158: Update golang.org/x/crypto
- 2022-04-20 VULN-8383 Bump yaml.v2 to yaml.v3
- 2022-04-21 TUN-6123: For a given connection with edge, close all datagram sessions through this connection when it's closed
- 2022-04-20 TUN-6015: Add RPC method for pushing local config
- 2022-04-21 TUN-6130: Fix vendoring due to case sensitive typo in package
- 2022-04-27 TUN-6142: Add tunnel details support to RPC
- 2022-04-28 TUN-6014: Add remote config flag as default feature
- 2022-04-12 TUN-6000: Another fix for publishing to brew core
- 2022-04-11 TUN-5990: Add otlp span export to response header
- 2022-04-19 TUN-6070: First connection retries other edge IPs if the error is quic timeout(likely due to firewall blocking UDP)
- 2022-04-11 TUN-6030: Add ttfb span for origin http request
2022.4.1
- 2022-04-11 TUN-6035: Reduce buffer size when proxying data
- 2022-04-11 TUN-6038: Reduce buffer size used for proxying data
- 2022-04-11 TUN-6043: Allow UI-managed Tunnels to fallback from QUIC but warn about that
- 2022-04-07 TUN-6000 add version argument to bump-formula-pr
- 2022-04-06 TUN-5989: Add in-memory otlp exporter
2022.4.0
- 2022-04-01 TUN-5973: Add backoff for non-recoverable errors as well
- 2022-04-05 TUN-5992: Use QUIC protocol for remotely managed tunnels when protocol is unspecified
- 2022-04-06 Update Makefile
- 2022-04-06 TUN-5995: Update prometheus to 1.12.1 to avoid vulnerabilities
- 2022-04-07 TUN-5995: Force prometheus v1.12.1 usage
- 2022-04-07 TUN-4130: cloudflared docker images now have a latest tag
- 2022-03-30 TUN-5842: Fix flaky TestConcurrentUpdateAndRead by making sure resources are released
- 2022-03-30 carrier: fix dropped errors
- 2022-03-25 TUN-5959: tidy go.mod
- 2022-03-25 TUN-5958: Fix release to homebrew core
- 2022-03-28 TUN-5960: Do not log the tunnel token or json credentials
- 2022-03-28 TUN-5956: Add timeout to session manager APIs
2022.3.4
- 2022-03-22 TUN-5918: Clean up text in cloudflared tunnel --help
- 2022-03-22 TUN-5895 run brew bump-formula-pr on release
- 2022-03-22 TUN-5915: New cloudflared command to allow to retrieve the token credentials for a Tunnel
- 2022-03-24 TUN-5933: Better messaging to help user when installing service if it is already installed
- 2022-03-25 TUN-5954: Start cloudflared service in Linux too similarly to other OSs
- 2022-03-14 TUN-5869: Add configuration endpoint in metrics server
2022.3.3
- 2022-03-17 TUN-5893: Start windows service on install, stop on uninstall. Previously user had to manually start the service after running 'cloudflared tunnel install' and stop the service before running uninstall command.
- 2022-03-17 Revert "CC-796: Remove dependency on unsupported version of go-oidc"
- 2022-03-18 TUN-5881: Clarify success (or lack thereof) of (un)installing cloudflared service
- 2022-03-18 CC-796: Remove dependency on unsupported version of go-oidc
- 2022-03-18 TUN-5907: Change notes for 2022.3.3
2022.3.2
- 2022-03-10 TUN-5833: Create constant for allow-remote-config
- 2022-03-15 TUN-5867: Return error if service was already installed
- 2022-03-16 TUN-5833: Send feature `allow_remote_config` if Tunnel is run with --token
- 2022-03-08 TUN-5849: Remove configuration debug log
- 2022-03-08 TUN-5850: Update CHANGES.md with latest releases
- 2022-03-08 TUN-5851: Update all references to point to Apache License 2.0
- 2022-03-07 TUN-5853 Add "install" make target and build package manager info into executable
- 2022-03-08 TUN-5801: Add custom wrapper for OriginConfig for JSON serde
- 2022-03-09 TUN-5703: Add prometheus metric for current configuration version
- 2022-02-05 CC-796: Remove dependency on unsupported version of go-oidc
2022.3.1
- 2022-03-04 TUN-5837: Log panic recovery in http2 logic with debug level log
- 2022-03-04 TUN-5696: HTTP/2 Configuration Update
- 2022-03-04 TUN-5836: Avoid websocket#Stream function from crashing cloudflared with unexpected memory access
- 2022-03-05 TUN-5836: QUIC transport no longer sets body to nil in any condition
2022.3.0
- 2022-03-02 TUN-5680: Adapt component tests for new service install based on token
- 2022-02-21 TUN-5682: Remove name field from credentials
- 2022-02-21 TUN-5681: Add support for running tunnel using Token
- 2022-02-28 TUN-5824: Update updater no-update-in-shell link
- 2022-02-28 TUN-5823: Warn about legacy flags that are ignored when ingress rules are used
- 2022-02-28 TUN-5737: Support https protocol over unix socket origin
- 2022-02-23 TUN-5679: Add support for service install using Tunnel Token
2022.2.2
- 2022-02-22 TUN-5754: Allow ingress validate to take plaintext option
- 2022-02-17 TUN-5678: Cloudflared uses typed tunnel API

View File

@ -7,20 +7,19 @@ export CGO_ENABLED=0
# This controls the directory the built artifacts go into
export ARTIFACT_DIR=built_artifacts/
mkdir -p $ARTIFACT_DIR
windowsArchs=("amd64" "386")
export TARGET_OS=windows
for arch in ${windowsArchs[@]}; do
export TARGET_ARCH=$arch
make cloudflared-msi
mv ./cloudflared.exe $ARTIFACT_DIR/cloudflared-windows-$arch.exe
mv cloudflared-$VERSION-$arch.msi $ARTIFACT_DIR/cloudflared-windows-$arch.msi
done
linuxArchs=("386" "amd64" "arm" "arm64")
linuxArchs=("386" "amd64" "arm" "armhf" "arm64")
export TARGET_OS=linux
for arch in ${linuxArchs[@]}; do
unset TARGET_ARM
export TARGET_ARCH=$arch
## Support for armhf builds
if [[ $arch == armhf ]] ; then
export TARGET_ARCH=arm
export TARGET_ARM=7
fi
make cloudflared-deb
mv cloudflared\_$VERSION\_$arch.deb $ARTIFACT_DIR/cloudflared-linux-$arch.deb

View File

@ -55,6 +55,9 @@ func createWebsocketStream(options *StartOptions, log *zerolog.Logger) (*cfwebso
}
dump, err := httputil.DumpRequest(req, false)
if err != nil {
return nil, err
}
log.Debug().Msgf("Websocket request: %s", string(dump))
dialer := &websocket.Dialer{
@ -182,6 +185,9 @@ func createAccessWebSocketStream(options *StartOptions, log *zerolog.Logger) (*w
}
dump, err := httputil.DumpRequest(req, false)
if err != nil {
return nil, nil, err
}
log.Debug().Msgf("Access Websocket request: %s", string(dump))
conn, resp, err := clientConnect(req, nil)

View File

@ -7,6 +7,7 @@ import (
type TunnelClient interface {
CreateTunnel(name string, tunnelSecret []byte) (*TunnelWithToken, error)
GetTunnel(tunnelID uuid.UUID) (*Tunnel, error)
GetTunnelToken(tunnelID uuid.UUID) (string, error)
DeleteTunnel(tunnelID uuid.UUID) error
ListTunnels(filter *TunnelFilter) ([]*Tunnel, error)
ListActiveClients(tunnelID uuid.UUID) ([]*ActiveClient, error)

View File

@ -116,6 +116,23 @@ func (r *RESTClient) GetTunnel(tunnelID uuid.UUID) (*Tunnel, error) {
return nil, r.statusCodeToError("get tunnel", resp)
}
func (r *RESTClient) GetTunnelToken(tunnelID uuid.UUID) (token string, err error) {
endpoint := r.baseEndpoints.accountLevel
endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("%v/token", tunnelID))
resp, err := r.sendRequest("GET", endpoint, nil)
if err != nil {
return "", errors.Wrap(err, "REST request failed")
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
err = parseResponse(resp.Body, &token)
return token, err
}
return "", r.statusCodeToError("get tunnel token", resp)
}
func (r *RESTClient) DeleteTunnel(tunnelID uuid.UUID) error {
endpoint := r.baseEndpoints.accountLevel
endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("%v", tunnelID))

27
cfio/copy.go Normal file
View File

@ -0,0 +1,27 @@
package cfio
import (
"io"
"sync"
)
const defaultBufferSize = 16 * 1024
var bufferPool = sync.Pool{
New: func() interface{} {
return make([]byte, defaultBufferSize)
},
}
func Copy(dst io.Writer, src io.Reader) (written int64, err error) {
_, okWriteTo := src.(io.WriterTo)
_, okReadFrom := dst.(io.ReaderFrom)
var buffer []byte = nil
if !(okWriteTo || okReadFrom) {
buffer = bufferPool.Get().([]byte)
defer bufferPool.Put(buffer)
}
return io.CopyBuffer(dst, src, buffer)
}

View File

@ -1,8 +1,8 @@
pinned_go: &pinned_go go=1.17.5-1
pinned_go_fips: &pinned_go_fips go-boring=1.17.5-1
pinned_go: &pinned_go go=1.18.6-1
pinned_go_fips: &pinned_go_fips go-boring=1.18.6-1
build_dir: &build_dir /cfsetup_build
default-flavor: buster
default-flavor: bullseye
stretch: &stretch
build:
build_dir: *build_dir
@ -40,16 +40,22 @@ stretch: &stretch
- libffi-dev
- python3-setuptools
- python3-pip
- reprepro
- createrepo
pre-cache: &github_release_pkgs_pre_cache
- wget https://github.com/sudarshan-reddy/msitools/releases/download/v0.101b/wixl -P /usr/local/bin
- chmod a+x /usr/local/bin/wixl
- pip3 install pynacl==1.4.0
- pip3 install pygithub==1.55
- pip3 install boto3==1.22.9
- pip3 install python-gnupg==0.4.9
post-cache:
# build all packages (except macos and FIPS) and move them to /cfsetup/built_artifacts
- ./build-packages.sh
# release the packages built and moved to /cfsetup/built_artifacts
- make github-release-built-pkgs
# publish packages to linux repos
- make release-pkgs-linux
# handle FIPS separately so that we built with gofips compiler
github-fips-release-pkgs:
build_dir: *build_dir
@ -72,6 +78,12 @@ stretch: &stretch
# same logic as above, but for FIPS packages only
- ./build-packages-fips.sh
- make github-release-built-pkgs
generate-versions-file:
build_dir: *build_dir
builddeps:
- build-essential
post-cache:
- make generate-docker-version
build-deb:
build_dir: *build_dir
builddeps: &build_deb_deps
@ -113,21 +125,9 @@ stretch: &stretch
- export GOOS=linux
- export GOARCH=arm64
- make cloudflared-deb
publish-deb:
build_dir: *build_dir
builddeps:
- *pinned_go
- build-essential
- fakeroot
- rubygem-fpm
- openssh-client
post-cache:
- export GOOS=linux
- export GOARCH=amd64
- make publish-deb
github-release-macos-amd64:
build_dir: *build_dir
builddeps:
builddeps: &build_pygithub
- *pinned_go
- build-essential
- python3-dev
@ -139,6 +139,27 @@ stretch: &stretch
- pip3 install pygithub==1.55
post-cache:
- make github-mac-upload
github-release-windows:
build_dir: *build_dir
builddeps:
- *pinned_go
- build-essential
- python3-dev
- libffi-dev
- python3-setuptools
- python3-pip
- wget
# libmsi and libgcab are libraries the wixl binary depends on.
- libmsi-dev
- libgcab-dev
pre-cache:
- wget https://github.com/sudarshan-reddy/msitools/releases/download/v0.101b/wixl -P /usr/local/bin
- chmod a+x /usr/local/bin/wixl
- pip3 install pynacl==1.4.0
- pip3 install pygithub==1.55
post-cache:
- .teamcity/package-windows.sh
- make github-windows-upload
test:
build_dir: *build_dir
builddeps:
@ -146,8 +167,7 @@ stretch: &stretch
- build-essential
- gotest-to-teamcity
pre-cache: &test_pre_cache
- go get golang.org/x/tools/cmd/goimports
- go get github.com/sudarshan-reddy/go-sumtype@v0.0.0-20210827105221-82eca7e5abb1
- go install golang.org/x/tools/cmd/goimports@latest
post-cache:
- export GOOS=linux
- export GOARCH=amd64
@ -169,6 +189,26 @@ stretch: &stretch
- ./fmt-check.sh
- make test | gotest-to-teamcity
component-test:
build_dir: *build_dir
builddeps:
- *pinned_go
- python3.7
- python3-pip
- python3-setuptools
# procps installs the ps command which is needed in test_sysv_service because the init script
# uses ps pid to determine if the agent is running
- procps
pre-cache-copy-paths:
- component-tests/requirements.txt
pre-cache: &component_test_pre_cache
- sudo pip3 install --upgrade -r component-tests/requirements.txt
post-cache: &component_test_post_cache
# Creates and routes a Named Tunnel for this build. Also constructs config file from env vars.
- python3 component-tests/setup.py --type create
- pytest component-tests -o log_cli=true --log-cli-level=INFO
# The Named Tunnel is deleted and its route unprovisioned here.
- python3 component-tests/setup.py --type cleanup
component-test-fips:
build_dir: *build_dir
builddeps:
- *pinned_go_fips
@ -180,29 +220,21 @@ stretch: &stretch
- procps
pre-cache-copy-paths:
- component-tests/requirements.txt
pre-cache:
- sudo pip3 install --upgrade -r component-tests/requirements.txt
post-cache:
# Creates and routes a Named Tunnel for this build. Also constructs config file from env vars.
- python3 component-tests/setup.py --type create
- pytest component-tests -o log_cli=true --log-cli-level=INFO
# The Named Tunnel is deleted and its route unprovisioned here.
- python3 component-tests/setup.py --type cleanup
pre-cache: *component_test_pre_cache
post-cache: *component_test_post_cache
update-homebrew:
builddeps:
- openssh-client
- s3cmd
- jq
- build-essential
- procps
post-cache:
- .teamcity/update-homebrew.sh
- .teamcity/update-homebrew-core.sh
github-message-release:
build_dir: *build_dir
builddeps:
- *pinned_go
- build-essential
- python3-dev
- libffi-dev
- python3-setuptools
- python3-pip
builddeps: *build_pygithub
pre-cache: *install_pygithub
post-cache:
- make github-message
@ -213,13 +245,13 @@ stretch: &stretch
- build-essential
- python3
- genisoimage
- jetez
pre-cache:
- ln -s /usr/bin/genisoimage /usr/bin/mkisofs
post-cache:
- export CGO_ENABLED=0
- export GOOS=freebsd
- export GOARCH=amd64
- make cloudflared-junos
- make cloudflared
publish-junos:
build_dir: *build_dir
builddeps:
@ -238,18 +270,4 @@ stretch: &stretch
buster: *stretch
bullseye: *stretch
centos-7:
publish-rpm:
build_dir: *build_dir
builddeps: &el7_builddeps
- https://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
pre-cache:
- yum install -y fakeroot
- yum upgrade -y binutils-2.27-44.base.el7.x86_64
- wget https://go.dev/dl/go1.17.5.linux-amd64.tar.gz -P /tmp/
- tar -C /usr/local -xzf /tmp/go1.17.5.linux-amd64.tar.gz
post-cache:
- export PATH=$PATH:/usr/local/go/bin
- export GOOS=linux
- export GOARCH=amd64
- make publish-rpm
bookworm: *stretch

View File

@ -56,11 +56,13 @@ const sentryDSN = "https://56a9c9fa5c364ab28f34b14f35ea0f1b@sentry.io/189878"
var (
shutdownC chan struct{}
userAgent = "DEV"
)
// Init will initialize and store vars from the main program
func Init(shutdown chan struct{}) {
func Init(shutdown chan struct{}, version string) {
shutdownC = shutdown
userAgent = fmt.Sprintf("cloudflared/%s", version)
}
// Flags return the global flags for Access related commands (hopefully none)
@ -294,6 +296,7 @@ func curl(c *cli.Context) error {
// run kicks off a shell task and pipe the results to the respective std pipes
func run(cmd string, args ...string) error {
c := exec.Command(cmd, args...)
c.Stdin = os.Stdin
stderr, err := c.StderrPipe()
if err != nil {
return err
@ -505,7 +508,7 @@ func isTokenValid(options *carrier.StartOptions, log *zerolog.Logger) (bool, err
if err != nil {
return false, errors.Wrap(err, "Could not create access request")
}
req.Header.Set("User-Agent", userAgent)
// Do not follow redirects
client := &http.Client{
CheckRedirect: func(req *http.Request, via []*http.Request) error {

View File

@ -0,0 +1,30 @@
package main
import (
"github.com/rs/zerolog"
"github.com/urfave/cli/v2"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
"github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel"
)
func buildArgsForToken(c *cli.Context, log *zerolog.Logger) ([]string, error) {
token := c.Args().First()
if _, err := tunnel.ParseToken(token); err != nil {
return nil, cliutil.UsageError("Provided tunnel token is not valid (%s).", err)
}
return []string{
"tunnel", "run", "--token", token,
}, nil
}
func getServiceExtraArgsFromCliArgs(c *cli.Context, log *zerolog.Logger) ([]string, error) {
if c.NArg() > 0 {
// currently, we only support extra args for token
return buildArgsForToken(c, log)
} else {
// empty extra args
return make([]string, 0), nil
}
}

View File

@ -6,7 +6,6 @@ package main
import (
"fmt"
"os"
"path/filepath"
"github.com/rs/zerolog"
"github.com/urfave/cli/v2"
@ -20,22 +19,16 @@ import (
func runApp(app *cli.App, graceShutdownC chan struct{}) {
app.Commands = append(app.Commands, &cli.Command{
Name: "service",
Usage: "Manages the Cloudflare Tunnel system service",
Usage: "Manages the cloudflared system service",
Subcommands: []*cli.Command{
{
Name: "install",
Usage: "Install Cloudflare Tunnel as a system service",
Usage: "Install cloudflared as a system service",
Action: cliutil.ConfiguredAction(installLinuxService),
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "legacy",
Usage: "Generate service file for non-named tunnels",
},
},
},
{
Name: "uninstall",
Usage: "Uninstall the Cloudflare Tunnel service",
Usage: "Uninstall the cloudflared service",
Action: cliutil.ConfiguredAction(uninstallLinuxService),
},
},
@ -50,19 +43,20 @@ const (
serviceConfigFile = "config.yml"
serviceCredentialFile = "cert.pem"
serviceConfigPath = serviceConfigDir + "/" + serviceConfigFile
cloudflaredService = "cloudflared.service"
)
var systemdTemplates = []ServiceTemplate{
{
Path: "/etc/systemd/system/cloudflared.service",
Path: fmt.Sprintf("/etc/systemd/system/%s", cloudflaredService),
Content: `[Unit]
Description=Cloudflare Tunnel
Description=cloudflared
After=network.target
[Service]
TimeoutStartSec=0
Type=notify
ExecStart={{ .Path }} --config /etc/cloudflared/config.yml --no-autoupdate{{ range .ExtraArgs }} {{ . }}{{ end }}
ExecStart={{ .Path }} --no-autoupdate{{ range .ExtraArgs }} {{ . }}{{ end }}
Restart=on-failure
RestartSec=5s
@ -73,7 +67,7 @@ WantedBy=multi-user.target
{
Path: "/etc/systemd/system/cloudflared-update.service",
Content: `[Unit]
Description=Update Cloudflare Tunnel
Description=Update cloudflared
After=network.target
[Service]
@ -83,7 +77,7 @@ ExecStart=/bin/bash -c '{{ .Path }} update; code=$?; if [ $code -eq 11 ]; then s
{
Path: "/etc/systemd/system/cloudflared-update.timer",
Content: `[Unit]
Description=Update Cloudflare Tunnel
Description=Update cloudflared
[Timer]
OnCalendar=daily
@ -100,7 +94,7 @@ var sysvTemplate = ServiceTemplate{
Content: `#!/bin/sh
# For RedHat and cousins:
# chkconfig: 2345 99 01
# description: Cloudflare Tunnel agent
# description: cloudflared
# processname: {{.Path}}
### BEGIN INIT INFO
# Provides: {{.Path}}
@ -108,11 +102,11 @@ var sysvTemplate = ServiceTemplate{
# Required-Stop:
# Default-Start: 2 3 4 5
# Default-Stop: 0 1 6
# Short-Description: Cloudflare Tunnel
# Description: Cloudflare Tunnel agent
# Short-Description: cloudflared
# Description: cloudflared agent
### END INIT INFO
name=$(basename $(readlink -f $0))
cmd="{{.Path}} --config /etc/cloudflared/config.yml --pidfile /var/run/$name.pid --autoupdate-freq 24h0m0s{{ range .ExtraArgs }} {{ . }}{{ end }}"
cmd="{{.Path}} --pidfile /var/run/$name.pid --autoupdate-freq 24h0m0s{{ range .ExtraArgs }} {{ . }}{{ end }}"
pid_file="/var/run/$name.pid"
stdout_log="/var/log/$name.log"
stderr_log="/var/log/$name.err"
@ -191,27 +185,6 @@ func isSystemd() bool {
return false
}
func copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile string, log *zerolog.Logger) error {
srcCredentialPath := filepath.Join(userConfigDir, userCredentialFile)
destCredentialPath := filepath.Join(serviceConfigDir, serviceCredentialFile)
if srcCredentialPath != destCredentialPath {
if err := copyCredential(srcCredentialPath, destCredentialPath); err != nil {
return err
}
}
srcConfigPath := filepath.Join(userConfigDir, userConfigFile)
destConfigPath := filepath.Join(serviceConfigDir, serviceConfigFile)
if srcConfigPath != destConfigPath {
if err := copyConfig(srcConfigPath, destConfigPath); err != nil {
return err
}
log.Info().Msgf("Copied %s to %s", srcConfigPath, destConfigPath)
}
return nil
}
func installLinuxService(c *cli.Context) error {
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
@ -223,61 +196,69 @@ func installLinuxService(c *cli.Context) error {
Path: etPath,
}
if err := ensureConfigDirExists(serviceConfigDir); err != nil {
var extraArgsFunc func(c *cli.Context, log *zerolog.Logger) ([]string, error)
if c.NArg() == 0 {
extraArgsFunc = buildArgsForConfig
} else {
extraArgsFunc = buildArgsForToken
}
extraArgs, err := extraArgsFunc(c, log)
if err != nil {
return err
}
if c.Bool("legacy") {
userConfigDir := filepath.Dir(c.String("config"))
userConfigFile := filepath.Base(c.String("config"))
userCredentialFile := config.DefaultCredentialFile
if err = copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile, log); err != nil {
log.Err(err).Msgf("Failed to copy user configuration. Before running the service, ensure that %s contains two files, %s and %s",
serviceConfigDir, serviceCredentialFile, serviceConfigFile)
return err
}
templateArgs.ExtraArgs = []string{
"--origincert", serviceConfigDir + "/" + serviceCredentialFile,
}
} else {
src, _, err := config.ReadConfigFile(c, log)
if err != nil {
return err
}
// can't use context because this command doesn't define "credentials-file" flag
configPresent := func(s string) bool {
val, err := src.String(s)
return err == nil && val != ""
}
if src.TunnelID == "" || !configPresent(tunnel.CredFileFlag) {
return fmt.Errorf(`Configuration file %s must contain entries for the tunnel to run and its associated credentials:
tunnel: TUNNEL-UUID
credentials-file: CREDENTIALS-FILE
`, src.Source())
}
if src.Source() != serviceConfigPath {
if exists, err := config.FileExists(serviceConfigPath); err != nil || exists {
return fmt.Errorf("Possible conflicting configuration in %[1]s and %[2]s. Either remove %[2]s or run `cloudflared --config %[2]s service install`", src.Source(), serviceConfigPath)
}
if err := copyFile(src.Source(), serviceConfigPath); err != nil {
return fmt.Errorf("failed to copy %s to %s: %w", src.Source(), serviceConfigPath, err)
}
}
templateArgs.ExtraArgs = []string{
"tunnel", "run",
}
}
templateArgs.ExtraArgs = extraArgs
switch {
case isSystemd():
log.Info().Msgf("Using Systemd")
return installSystemd(&templateArgs, log)
err = installSystemd(&templateArgs, log)
default:
log.Info().Msgf("Using SysV")
return installSysv(&templateArgs, log)
err = installSysv(&templateArgs, log)
}
if err == nil {
log.Info().Msg("Linux service for cloudflared installed successfully")
}
return err
}
func buildArgsForConfig(c *cli.Context, log *zerolog.Logger) ([]string, error) {
if err := ensureConfigDirExists(serviceConfigDir); err != nil {
return nil, err
}
src, _, err := config.ReadConfigFile(c, log)
if err != nil {
return nil, err
}
// can't use context because this command doesn't define "credentials-file" flag
configPresent := func(s string) bool {
val, err := src.String(s)
return err == nil && val != ""
}
if src.TunnelID == "" || !configPresent(tunnel.CredFileFlag) {
return nil, fmt.Errorf(`Configuration file %s must contain entries for the tunnel to run and its associated credentials:
tunnel: TUNNEL-UUID
credentials-file: CREDENTIALS-FILE
`, src.Source())
}
if src.Source() != serviceConfigPath {
if exists, err := config.FileExists(serviceConfigPath); err != nil || exists {
return nil, fmt.Errorf("Possible conflicting configuration in %[1]s and %[2]s. Either remove %[2]s or run `cloudflared --config %[2]s service install`", src.Source(), serviceConfigPath)
}
if err := copyFile(src.Source(), serviceConfigPath); err != nil {
return nil, fmt.Errorf("failed to copy %s to %s: %w", src.Source(), serviceConfigPath, err)
}
}
return []string{
"--config", "/etc/cloudflared/config.yml", "tunnel", "run",
}, nil
}
func installSystemd(templateArgs *ServiceTemplateArgs, log *zerolog.Logger) error {
@ -288,16 +269,19 @@ func installSystemd(templateArgs *ServiceTemplateArgs, log *zerolog.Logger) erro
return err
}
}
if err := runCommand("systemctl", "enable", "cloudflared.service"); err != nil {
log.Err(err).Msg("systemctl enable cloudflared.service error")
if err := runCommand("systemctl", "enable", cloudflaredService); err != nil {
log.Err(err).Msgf("systemctl enable %s error", cloudflaredService)
return err
}
if err := runCommand("systemctl", "start", "cloudflared-update.timer"); err != nil {
log.Err(err).Msg("systemctl start cloudflared-update.timer error")
return err
}
log.Info().Msg("systemctl daemon-reload")
return runCommand("systemctl", "daemon-reload")
if err := runCommand("systemctl", "daemon-reload"); err != nil {
log.Err(err).Msg("systemctl daemon-reload error")
return err
}
return runCommand("systemctl", "start", cloudflaredService)
}
func installSysv(templateArgs *ServiceTemplateArgs, log *zerolog.Logger) error {
@ -320,25 +304,35 @@ func installSysv(templateArgs *ServiceTemplateArgs, log *zerolog.Logger) error {
continue
}
}
return nil
return runCommand("service", "cloudflared", "start")
}
func uninstallLinuxService(c *cli.Context) error {
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
var err error
switch {
case isSystemd():
log.Info().Msg("Using Systemd")
return uninstallSystemd(log)
err = uninstallSystemd(log)
default:
log.Info().Msg("Using SysV")
return uninstallSysv(log)
err = uninstallSysv(log)
}
if err == nil {
log.Info().Msg("Linux service for cloudflared uninstalled successfully")
}
return err
}
func uninstallSystemd(log *zerolog.Logger) error {
if err := runCommand("systemctl", "disable", "cloudflared.service"); err != nil {
log.Err(err).Msg("systemctl disable cloudflared.service error")
if err := runCommand("systemctl", "disable", cloudflaredService); err != nil {
log.Err(err).Msgf("systemctl disable %s error", cloudflaredService)
return err
}
if err := runCommand("systemctl", "stop", cloudflaredService); err != nil {
log.Err(err).Msgf("systemctl stop %s error", cloudflaredService)
return err
}
if err := runCommand("systemctl", "stop", "cloudflared-update.timer"); err != nil {
@ -351,11 +345,18 @@ func uninstallSystemd(log *zerolog.Logger) error {
return err
}
}
log.Info().Msgf("Successfully uninstalled cloudflared service from systemd")
if err := runCommand("systemctl", "daemon-reload"); err != nil {
log.Err(err).Msg("systemctl daemon-reload error")
return err
}
return nil
}
func uninstallSysv(log *zerolog.Logger) error {
if err := runCommand("service", "cloudflared", "stop"); err != nil {
log.Err(err).Msg("service cloudflared stop error")
return err
}
if err := sysvTemplate.Remove(); err != nil {
log.Err(err).Msg("error removing service template")
return err
@ -370,6 +371,5 @@ func uninstallSysv(log *zerolog.Logger) error {
continue
}
}
log.Info().Msgf("Successfully uninstalled cloudflared service from sysv")
return nil
}

View File

@ -21,16 +21,16 @@ const (
func runApp(app *cli.App, graceShutdownC chan struct{}) {
app.Commands = append(app.Commands, &cli.Command{
Name: "service",
Usage: "Manages the Cloudflare Tunnel launch agent",
Usage: "Manages the cloudflared launch agent",
Subcommands: []*cli.Command{
{
Name: "install",
Usage: "Install Cloudflare Tunnel as an user launch agent",
Usage: "Install cloudflared as an user launch agent",
Action: cliutil.ConfiguredAction(installLaunchd),
},
{
Name: "uninstall",
Usage: "Uninstall the Cloudflare Tunnel launch agent",
Usage: "Uninstall the cloudflared launch agent",
Action: cliutil.ConfiguredAction(uninstallLaunchd),
},
},
@ -50,6 +50,9 @@ func newLaunchdTemplate(installPath, stdoutPath, stderrPath string) *ServiceTemp
<key>ProgramArguments</key>
<array>
<string>{{ .Path }}</string>
{{- range $i, $item := .ExtraArgs}}
<string>{{ $item }}</string>
{{- end}}
</array>
<key>RunAtLoad</key>
<true/>
@ -111,12 +114,12 @@ func installLaunchd(c *cli.Context) error {
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
if isRootUser() {
log.Info().Msg("Installing Cloudflare Tunnel client as a system launch daemon. " +
"Cloudflare Tunnel client will run at boot")
log.Info().Msg("Installing cloudflared client as a system launch daemon. " +
"cloudflared client will run at boot")
} else {
log.Info().Msg("Installing Cloudflare Tunnel client as an user launch agent. " +
"Note that Cloudflare Tunnel client will only run when the user is logged in. " +
"If you want to run Cloudflare Tunnel client at boot, install with root permission. " +
log.Info().Msg("Installing cloudflared client as an user launch agent. " +
"Note that cloudflared client will only run when the user is logged in. " +
"If you want to run cloudflared client at boot, install with root permission. " +
"For more information, visit https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/run-tunnel/run-as-service")
}
etPath, err := os.Executable()
@ -129,6 +132,13 @@ func installLaunchd(c *cli.Context) error {
log.Err(err).Msg("Error determining install path")
return errors.Wrap(err, "Error determining install path")
}
extraArgs, err := getServiceExtraArgsFromCliArgs(c, log)
if err != nil {
errMsg := "Unable to determine extra arguments for launch daemon"
log.Err(err).Msg(errMsg)
return errors.Wrap(err, errMsg)
}
stdoutPath, err := stdoutPath()
if err != nil {
log.Err(err).Msg("error determining stdout path")
@ -140,7 +150,7 @@ func installLaunchd(c *cli.Context) error {
return errors.Wrap(err, "error determining stderr path")
}
launchdTemplate := newLaunchdTemplate(installPath, stdoutPath, stderrPath)
templateArgs := ServiceTemplateArgs{Path: etPath}
templateArgs := ServiceTemplateArgs{Path: etPath, ExtraArgs: extraArgs}
err = launchdTemplate.Generate(&templateArgs)
if err != nil {
log.Err(err).Msg("error generating launchd template")
@ -153,16 +163,20 @@ func installLaunchd(c *cli.Context) error {
}
log.Info().Msgf("Outputs are logged to %s and %s", stderrPath, stdoutPath)
return runCommand("launchctl", "load", plistPath)
err = runCommand("launchctl", "load", plistPath)
if err == nil {
log.Info().Msg("MacOS service for cloudflared installed successfully")
}
return err
}
func uninstallLaunchd(c *cli.Context) error {
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
if isRootUser() {
log.Info().Msg("Uninstalling Cloudflare Tunnel as a system launch daemon")
log.Info().Msg("Uninstalling cloudflared as a system launch daemon")
} else {
log.Info().Msg("Uninstalling Cloudflare Tunnel as an user launch agent")
log.Info().Msg("Uninstalling cloudflared as a user launch agent")
}
installPath, err := installPath()
if err != nil {
@ -184,10 +198,13 @@ func uninstallLaunchd(c *cli.Context) error {
}
err = runCommand("launchctl", "unload", plistPath)
if err != nil {
log.Err(err).Msg("error unloading")
log.Err(err).Msg("error unloading launchd")
return err
}
log.Info().Msgf("Outputs are logged to %s and %s", stderrPath, stdoutPath)
return launchdTemplate.Remove()
err = launchdTemplate.Remove()
if err == nil {
log.Info().Msg("Launchd for cloudflared was uninstalled successfully")
}
return err
}

View File

@ -21,6 +21,8 @@ import (
"github.com/cloudflare/cloudflared/logger"
"github.com/cloudflare/cloudflared/metrics"
"github.com/cloudflare/cloudflared/overwatch"
"github.com/cloudflare/cloudflared/token"
"github.com/cloudflare/cloudflared/tracing"
"github.com/cloudflare/cloudflared/watcher"
)
@ -69,7 +71,7 @@ func main() {
app.Copyright = fmt.Sprintf(
`(c) %d Cloudflare Inc.
Your installation of cloudflared software constitutes a symbol of your signature indicating that you accept
the terms of the Cloudflare License (https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/license),
the terms of the Apache License Version 2.0 (https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/license),
Terms (https://www.cloudflare.com/terms/) and Privacy Policy (https://www.cloudflare.com/privacypolicy/).`,
time.Now().Year(),
)
@ -84,8 +86,10 @@ func main() {
app.Commands = commands(cli.ShowVersion)
tunnel.Init(bInfo, graceShutdownC) // we need this to support the tunnel sub command...
access.Init(graceShutdownC)
access.Init(graceShutdownC, Version)
updater.Init(Version)
tracing.Init(Version)
token.Init(Version)
runApp(app, graceShutdownC)
}

View File

@ -73,7 +73,7 @@ func Run(c *cli.Context) error {
log.Fatal().Err(err).Msg("Failed to open the metrics listener")
}
go metrics.ServeMetrics(metricsListener, nil, nil, "", log)
go metrics.ServeMetrics(metricsListener, nil, nil, "", nil, log)
listener, err := tunneldns.CreateListener(
c.String("address"),

View File

@ -8,6 +8,7 @@ import (
"io/ioutil"
"os"
"os/exec"
"path"
"text/template"
homedir "github.com/mitchellh/go-homedir"
@ -43,15 +44,26 @@ func (st *ServiceTemplate) Generate(args *ServiceTemplateArgs) error {
if err != nil {
return err
}
if _, err = os.Stat(resolvedPath); err == nil {
return fmt.Errorf(serviceAlreadyExistsWarn(resolvedPath))
}
var buffer bytes.Buffer
err = tmpl.Execute(&buffer, args)
if err != nil {
return fmt.Errorf("error generating %s: %v", st.Path, err)
}
fileMode := os.FileMode(0644)
fileMode := os.FileMode(0o644)
if st.FileMode != 0 {
fileMode = st.FileMode
}
plistFolder := path.Dir(resolvedPath)
err = os.MkdirAll(plistFolder, 0o755)
if err != nil {
return fmt.Errorf("error creating %s: %v", plistFolder, err)
}
err = ioutil.WriteFile(resolvedPath, buffer.Bytes(), fileMode)
if err != nil {
return fmt.Errorf("error writing %s: %v", resolvedPath, err)
@ -71,6 +83,15 @@ func (st *ServiceTemplate) Remove() error {
return nil
}
func serviceAlreadyExistsWarn(service string) string {
return fmt.Sprintf("cloudflared service is already installed at %s; if you are running a cloudflared tunnel, you "+
"can point it to multiple origins, avoiding the need to run more than one cloudflared service in the "+
"same machine; otherwise if you are really sure, you can do `cloudflared service uninstall` to clean "+
"up the existing service and then try again this command",
service,
)
}
func runCommand(command string, args ...string) error {
cmd := exec.Command(command, args...)
stderr, err := cmd.StderrPipe()
@ -82,10 +103,10 @@ func runCommand(command string, args ...string) error {
return fmt.Errorf("error starting %s: %v", command, err)
}
_, _ = ioutil.ReadAll(stderr)
output, _ := ioutil.ReadAll(stderr)
err = cmd.Wait()
if err != nil {
return fmt.Errorf("%s returned with error: %v", command, err)
return fmt.Errorf("%s %v returned with error code %v due to: %v", command, args, err, string(output))
}
return nil
}

View File

@ -15,6 +15,7 @@ import (
"github.com/coreos/go-systemd/daemon"
"github.com/facebookgo/grace/gracenet"
"github.com/getsentry/raven-go"
"github.com/google/uuid"
homedir "github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/rs/zerolog"
@ -24,7 +25,6 @@ import (
"github.com/cloudflare/cloudflared/cfapi"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
"github.com/cloudflare/cloudflared/cmd/cloudflared/proxydns"
"github.com/cloudflare/cloudflared/cmd/cloudflared/ui"
"github.com/cloudflare/cloudflared/cmd/cloudflared/updater"
"github.com/cloudflare/cloudflared/config"
"github.com/cloudflare/cloudflared/connection"
@ -109,6 +109,7 @@ func Commands() []*cli.Command {
buildIngressSubcommand(),
buildDeleteCommand(),
buildCleanupCommand(),
buildTokenCommand(),
// for compatibility, allow following as tunnel subcommands
proxydns.Command(true),
cliutil.RemovedCommand("db-connect"),
@ -127,26 +128,34 @@ func buildTunnelCommand(subcommands []*cli.Command) *cli.Command {
Name: "tunnel",
Action: cliutil.ConfiguredAction(TunnelCommand),
Category: "Tunnel",
Usage: "Make a locally-running web service accessible over the internet using Cloudflare Tunnel.",
Usage: "Use Cloudflare Tunnel to expose private services to the Internet or to Cloudflare connected private users.",
ArgsUsage: " ",
Description: `Cloudflare Tunnel asks you to specify a hostname on a Cloudflare-powered
domain you control and a local address. Traffic from that hostname is routed
(optionally via a Cloudflare Load Balancer) to this machine and appears on the
specified port where it can be served.
Description: ` Cloudflare Tunnel allows to expose private services without opening any ingress port on this machine. It can expose:
A) Locally reachable HTTP-based private services to the Internet on DNS with Cloudflare as authority (which you can
then protect with Cloudflare Access).
B) Locally reachable TCP/UDP-based private services to Cloudflare connected private users in the same account, e.g.,
those enrolled to a Zero Trust WARP Client.
This feature requires your Cloudflare account be subscribed to the Cloudflare Smart Routing feature.
You can manage your Tunnels via dash.teams.cloudflare.com. This approach will only require you to run a single command
later in each machine where you wish to run a Tunnel.
To use, begin by calling login to download a certificate:
Alternatively, you can manage your Tunnels via the command line. Begin by obtaining a certificate to be able to do so:
$ cloudflared tunnel login
$ cloudflared tunnel login
With your certificate installed you can then launch your first tunnel,
replacing my.site.com with a subdomain of your site:
With your certificate installed you can then get started with Tunnels:
$ cloudflared tunnel --hostname my.site.com --url http://localhost:8080
$ cloudflared tunnel create my-first-tunnel
$ cloudflared tunnel route dns my-first-tunnel my-first-tunnel.mydomain.com
$ cloudflared tunnel run --hello-world my-first-tunnel
If you have a web server running on port 8080 (in this example), it will be available on
the internet!`,
You can now access my-first-tunnel.mydomain.com and be served an example page by your local cloudflared process.
For exposing local TCP/UDP services by IP to your privately connected users, check out:
$ cloudflared tunnel route ip --help
See https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/ for more info.`,
Subcommands: subcommands,
Flags: tunnelFlags(false),
}
@ -208,7 +217,7 @@ func runAdhocNamedTunnel(sc *subcommandContext, name, credentialsOutputPath stri
// runClassicTunnel creates a "classic" non-named tunnel
func runClassicTunnel(sc *subcommandContext) error {
return StartServer(sc.c, buildInfo, nil, sc.log, sc.isUIEnabled)
return StartServer(sc.c, buildInfo, nil, sc.log)
}
func routeFromFlag(c *cli.Context) (route cfapi.HostnameRoute, ok bool) {
@ -226,7 +235,6 @@ func StartServer(
info *cliutil.BuildInfo,
namedTunnel *connection.NamedTunnelProperties,
log *zerolog.Logger,
isUIEnabled bool,
) error {
_ = raven.SetDSN(sentryDSN)
var wg sync.WaitGroup
@ -321,9 +329,9 @@ func StartServer(
return fmt.Errorf(errText)
}
logTransport := logger.CreateTransportLoggerFromContext(c, isUIEnabled)
logTransport := logger.CreateTransportLoggerFromContext(c, logger.EnableTerminalLog)
observer := connection.NewObserver(log, logTransport, isUIEnabled)
observer := connection.NewObserver(log, logTransport)
// Send Quick Tunnel URL to UI if applicable
var quickTunnelURL string
@ -334,11 +342,24 @@ func StartServer(
observer.SendURL(quickTunnelURL)
}
tunnelConfig, dynamicConfig, err := prepareTunnelConfig(c, info, log, logTransport, observer, namedTunnel)
tunnelConfig, orchestratorConfig, err := prepareTunnelConfig(c, info, log, logTransport, observer, namedTunnel)
if err != nil {
log.Err(err).Msg("Couldn't start tunnel")
return err
}
var clientID uuid.UUID
if tunnelConfig.NamedTunnel != nil {
clientID, err = uuid.FromBytes(tunnelConfig.NamedTunnel.Client.ClientID)
if err != nil {
// set to nil for classic tunnels
clientID = uuid.Nil
}
}
orchestrator, err := orchestration.NewOrchestrator(ctx, orchestratorConfig, tunnelConfig.Tags, tunnelConfig.Log)
if err != nil {
return err
}
metricsListener, err := listeners.Listen("tcp", c.String("metrics"))
if err != nil {
@ -349,17 +370,12 @@ func StartServer(
wg.Add(1)
go func() {
defer wg.Done()
readinessServer := metrics.NewReadyServer(log)
readinessServer := metrics.NewReadyServer(log, clientID)
observer.RegisterSink(readinessServer)
errC <- metrics.ServeMetrics(metricsListener, ctx.Done(), readinessServer, quickTunnelURL, log)
errC <- metrics.ServeMetrics(metricsListener, ctx.Done(), readinessServer, quickTunnelURL, orchestrator, log)
}()
orchestrator, err := orchestration.NewOrchestrator(ctx, dynamicConfig, tunnelConfig.Tags, tunnelConfig.Log)
if err != nil {
return err
}
reconnectCh := make(chan supervisor.ReconnectSignal, 1)
reconnectCh := make(chan supervisor.ReconnectSignal, c.Int("ha-connections"))
if c.IsSet("stdin-control") {
log.Info().Msg("Enabling control through stdin")
go stdinControl(reconnectCh, log)
@ -374,18 +390,6 @@ func StartServer(
errC <- supervisor.StartTunnelDaemon(ctx, tunnelConfig, orchestrator, connectedSignal, reconnectCh, graceShutdownC)
}()
if isUIEnabled {
tunnelUI := ui.NewUIModel(
info.Version(),
hostname,
metricsListener.Addr().String(),
dynamicConfig.Ingress,
tunnelConfig.HAConnections,
)
app := tunnelUI.Launch(ctx, log, logTransport)
observer.RegisterSink(app)
}
gracePeriod, err := gracePeriod(c)
if err != nil {
return err
@ -505,6 +509,13 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
Usage: "Cloudflare Edge region to connect to. Omit or set to empty to connect to the global region.",
EnvVars: []string{"TUNNEL_REGION"},
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "edge-ip-version",
Usage: "Cloudflare Edge ip address version to connect with. {4, 6, auto}",
EnvVars: []string{"TUNNEL_EDGE_IP_VERSION"},
Value: "4",
Hidden: false,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: tlsconfig.CaCertFlag,
Usage: "Certificate Authority authenticating connections with Cloudflare's edge network.",
@ -638,9 +649,9 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
}),
altsrc.NewBoolFlag(&cli.BoolFlag{
Name: uiFlag,
Usage: "Launch tunnel UI. Tunnel logs are scrollable via 'j', 'k', or arrow keys.",
Usage: "(depreciated) Launch tunnel UI. Tunnel logs are scrollable via 'j', 'k', or arrow keys.",
Value: false,
Hidden: shouldHide,
Hidden: true,
}),
altsrc.NewStringFlag(&cli.StringFlag{
Name: "quick-service",
@ -654,6 +665,13 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
EnvVars: []string{"TUNNEL_MAX_FETCH_SIZE"},
Hidden: true,
}),
altsrc.NewBoolFlag(&cli.BoolFlag{
Name: "post-quantum",
Usage: "When given creates an experimental post-quantum secure tunnel",
Aliases: []string{"pq"},
EnvVars: []string{"TUNNEL_POST_QUANTUM"},
Hidden: FipsEnabled,
}),
selectProtocolFlag,
overwriteDNSFlag,
}...)
@ -812,6 +830,13 @@ func configureProxyFlags(shouldHide bool) []cli.Flag {
EnvVars: []string{"TUNNEL_NO_CHUNKED_ENCODING"},
Hidden: shouldHide,
}),
altsrc.NewBoolFlag(&cli.BoolFlag{
Name: ingress.Http2OriginFlag,
Usage: "Enables HTTP/2 origin servers.",
EnvVars: []string{"TUNNEL_ORIGIN_ENABLE_HTTP2"},
Hidden: shouldHide,
Value: false,
}),
}
return append(flags, sshFlags(shouldHide)...)
}

View File

@ -4,6 +4,9 @@ import (
"crypto/tls"
"fmt"
"io/ioutil"
mathRand "math/rand"
"net"
"net/netip"
"os"
"path/filepath"
"strings"
@ -14,9 +17,12 @@ import (
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/urfave/cli/v2"
"github.com/urfave/cli/v2/altsrc"
"golang.org/x/crypto/ssh/terminal"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
"github.com/cloudflare/cloudflared/edgediscovery/allregions"
"github.com/cloudflare/cloudflared/packet"
"github.com/cloudflare/cloudflared/config"
"github.com/cloudflare/cloudflared/connection"
@ -31,14 +37,19 @@ import (
)
const LogFieldOriginCertPath = "originCertPath"
const secretValue = "*****"
var (
developerPortal = "https://developers.cloudflare.com/argo-tunnel"
quickStartUrl = developerPortal + "/quickstart/quickstart/"
serviceUrl = developerPortal + "/reference/service/"
argumentsUrl = developerPortal + "/reference/arguments/"
LogFieldHostname = "hostname"
secretFlags = [2]*altsrc.StringFlag{credentialsContentsFlag, tunnelTokenFlag}
defaultFeatures = []string{supervisor.FeatureAllowRemoteConfig, supervisor.FeatureSerializedHeaders, supervisor.FeatureDatagramV2}
configFlags = []string{"autoupdate-freq", "no-autoupdate", "retries", "protocol", "loglevel", "transport-loglevel", "origincert", "metrics", "metrics-update-freq", "edge-ip-version"}
)
// returns the first path that contains a cert.pem file. If none of the DefaultConfigSearchDirectories
@ -65,7 +76,11 @@ func generateRandomClientID(log *zerolog.Logger) (string, error) {
func logClientOptions(c *cli.Context, log *zerolog.Logger) {
flags := make(map[string]interface{})
for _, flag := range c.FlagNames() {
flags[flag] = c.Generic(flag)
if isSecretFlag(flag) {
flags[flag] = secretValue
} else {
flags[flag] = c.Generic(flag)
}
}
if len(flags) > 0 {
@ -79,7 +94,11 @@ func logClientOptions(c *cli.Context, log *zerolog.Logger) {
if strings.Contains(env, "TUNNEL_") {
vars := strings.Split(env, "=")
if len(vars) == 2 {
envs[vars[0]] = vars[1]
if isSecretEnvVar(vars[0]) {
envs[vars[0]] = secretValue
} else {
envs[vars[0]] = vars[1]
}
}
}
}
@ -88,6 +107,26 @@ func logClientOptions(c *cli.Context, log *zerolog.Logger) {
}
}
func isSecretFlag(key string) bool {
for _, flag := range secretFlags {
if flag.Name == key {
return true
}
}
return false
}
func isSecretEnvVar(key string) bool {
for _, flag := range secretFlags {
for _, secretEnvVar := range flag.EnvVars {
if secretEnvVar == key {
return true
}
}
}
return false
}
func dnsProxyStandAlone(c *cli.Context, namedTunnel *connection.NamedTunnelProperties) bool {
return c.IsSet("proxy-dns") && (!c.IsSet("hostname") && !c.IsSet("tag") && !c.IsSet("hello-world") && namedTunnel == nil)
}
@ -183,6 +222,23 @@ func prepareTunnelConfig(
ingressRules ingress.Ingress
classicTunnel *connection.ClassicTunnelProperties
)
transportProtocol := c.String("protocol")
needPQ := c.Bool("post-quantum")
if needPQ {
if FipsEnabled {
return nil, nil, fmt.Errorf("post-quantum not supported in FIPS mode")
}
// Error if the user tries to force a non-quic transport protocol
if transportProtocol != connection.AutoSelectFlag && transportProtocol != connection.QUIC.String() {
return nil, nil, fmt.Errorf("post-quantum is only supported with the quic transport")
}
transportProtocol = connection.QUIC.String()
}
protocolFetcher := edgediscovery.ProtocolPercentage
cfg := config.GetConfiguration()
if isNamedTunnel {
clientUUID, err := uuid.NewRandom()
@ -190,7 +246,29 @@ func prepareTunnelConfig(
return nil, nil, errors.Wrap(err, "can't generate connector UUID")
}
log.Info().Msgf("Generated Connector ID: %s", clientUUID)
features := append(c.StringSlice("features"), supervisor.FeatureSerializedHeaders)
features := append(c.StringSlice("features"), defaultFeatures...)
if needPQ {
features = append(features, supervisor.FeaturePostQuantum)
}
if c.IsSet(TunnelTokenFlag) {
if transportProtocol == connection.AutoSelectFlag {
protocolFetcher = func() (edgediscovery.ProtocolPercents, error) {
// If the Tunnel is remotely managed and no protocol is set, we prefer QUIC, but still allow fall-back.
preferQuic := []edgediscovery.ProtocolPercent{
{
Protocol: connection.QUIC.String(),
Percentage: 100,
},
{
Protocol: connection.HTTP2.String(),
Percentage: 100,
},
}
return preferQuic, nil
}
}
log.Info().Msg("Will be fetching remotely managed configuration from Cloudflare API. Defaulting to protocol: quic")
}
namedTunnel.Client = tunnelpogs.ClientInfo{
ClientID: clientUUID[:],
Features: dedup(features),
@ -233,7 +311,7 @@ func prepareTunnelConfig(
}
warpRoutingEnabled := isWarpRoutingEnabled(cfg.WarpRouting, isNamedTunnel)
protocolSelector, err := connection.NewProtocolSelector(c.String("protocol"), warpRoutingEnabled, namedTunnel, edgediscovery.ProtocolPercentage, supervisor.ResolveTTL, log)
protocolSelector, err := connection.NewProtocolSelector(transportProtocol, warpRoutingEnabled, namedTunnel, protocolFetcher, supervisor.ResolveTTL, log, c.Bool("post-quantum"))
if err != nil {
return nil, nil, err
}
@ -267,6 +345,19 @@ func prepareTunnelConfig(
CompressionSetting: h2mux.CompressionSetting(uint64(c.Int("compression-quality"))),
MetricsUpdateFreq: c.Duration("metrics-update-freq"),
}
edgeIPVersion, err := parseConfigIPVersion(c.String("edge-ip-version"))
if err != nil {
return nil, nil, err
}
var pqKexIdx int
if needPQ {
pqKexIdx = mathRand.Intn(len(supervisor.PQKexes))
log.Info().Msgf(
"Using experimental hybrid post-quantum key agreement %s",
supervisor.PQKexNames[supervisor.PQKexes[pqKexIdx]],
)
}
tunnelConfig := &supervisor.TunnelConfig{
GracePeriod: gracePeriod,
@ -275,6 +366,7 @@ func prepareTunnelConfig(
ClientID: clientID,
EdgeAddrs: c.StringSlice("edge"),
Region: c.String("region"),
EdgeIPVersion: edgeIPVersion,
HAConnections: c.Int("ha-connections"),
IncidentLookup: supervisor.NewIncidentLookup(),
IsAutoupdated: c.Bool("is-autoupdated"),
@ -292,12 +384,33 @@ func prepareTunnelConfig(
MuxerConfig: muxerConfig,
ProtocolSelector: protocolSelector,
EdgeTLSConfigs: edgeTLSConfigs,
NeedPQ: needPQ,
PQKexIdx: pqKexIdx,
}
dynamicConfig := &orchestration.Config{
packetConfig, err := newPacketConfig(c, log)
if err != nil {
log.Warn().Err(err).Msg("ICMP proxy feature is disabled")
} else {
tunnelConfig.PacketConfig = packetConfig
}
orchestratorConfig := &orchestration.Config{
Ingress: &ingressRules,
WarpRoutingEnabled: warpRoutingEnabled,
WarpRouting: ingress.NewWarpRoutingConfig(&cfg.WarpRouting),
ConfigurationFlags: parseConfigFlags(c),
}
return tunnelConfig, dynamicConfig, nil
return tunnelConfig, orchestratorConfig, nil
}
func parseConfigFlags(c *cli.Context) map[string]string {
result := make(map[string]string)
for _, flag := range configFlags {
if v := c.String(flag); c.IsSet(flag) && v != "" {
result[flag] = v
}
}
return result
}
func gracePeriod(c *cli.Context) (time.Duration, error) {
@ -334,3 +447,162 @@ func dedup(slice []string) []string {
}
return keys
}
// ParseConfigIPVersion returns the IP version from possible expected values from config
func parseConfigIPVersion(version string) (v allregions.ConfigIPVersion, err error) {
switch version {
case "4":
v = allregions.IPv4Only
case "6":
v = allregions.IPv6Only
case "auto":
v = allregions.Auto
default: // unspecified or invalid
err = fmt.Errorf("invalid value for edge-ip-version: %s", version)
}
return
}
func newPacketConfig(c *cli.Context, logger *zerolog.Logger) (*packet.GlobalRouterConfig, error) {
ipv4Src, err := determineICMPv4Src(c.String("icmpv4-src"), logger)
if err != nil {
return nil, errors.Wrap(err, "failed to determine IPv4 source address for ICMP proxy")
}
logger.Info().Msgf("ICMP proxy will use %s as source for IPv4", ipv4Src)
ipv6Src, zone, err := determineICMPv6Src(c.String("icmpv6-src"), logger, ipv4Src)
if err != nil {
return nil, errors.Wrap(err, "failed to determine IPv6 source address for ICMP proxy")
}
if zone != "" {
logger.Info().Msgf("ICMP proxy will use %s in zone %s as source for IPv6", ipv6Src, zone)
} else {
logger.Info().Msgf("ICMP proxy will use %s as source for IPv6", ipv6Src)
}
icmpRouter, err := ingress.NewICMPRouter(ipv4Src, ipv6Src, zone, logger)
if err != nil {
return nil, err
}
return &packet.GlobalRouterConfig{
ICMPRouter: icmpRouter,
IPv4Src: ipv4Src,
IPv6Src: ipv6Src,
Zone: zone,
}, nil
}
func determineICMPv4Src(userDefinedSrc string, logger *zerolog.Logger) (netip.Addr, error) {
if userDefinedSrc != "" {
addr, err := netip.ParseAddr(userDefinedSrc)
if err != nil {
return netip.Addr{}, err
}
if addr.Is4() {
return addr, nil
}
return netip.Addr{}, fmt.Errorf("expect IPv4, but %s is IPv6", userDefinedSrc)
}
addr, err := findLocalAddr(net.ParseIP("192.168.0.1"), 53)
if err != nil {
addr = netip.IPv4Unspecified()
logger.Debug().Err(err).Msgf("Failed to determine the IPv4 for this machine. It will use %s to send/listen for ICMPv4 echo", addr)
}
return addr, nil
}
type interfaceIP struct {
name string
ip net.IP
}
func determineICMPv6Src(userDefinedSrc string, logger *zerolog.Logger, ipv4Src netip.Addr) (addr netip.Addr, zone string, err error) {
if userDefinedSrc != "" {
userDefinedIP, zone, _ := strings.Cut(userDefinedSrc, "%")
addr, err := netip.ParseAddr(userDefinedIP)
if err != nil {
return netip.Addr{}, "", err
}
if addr.Is6() {
return addr, zone, nil
}
return netip.Addr{}, "", fmt.Errorf("expect IPv6, but %s is IPv4", userDefinedSrc)
}
// Loop through all the interfaces, the preference is
// 1. The interface where ipv4Src is in
// 2. Interface with IPv6 address
// 3. Unspecified interface
interfaces, err := net.Interfaces()
if err != nil {
return netip.IPv6Unspecified(), "", nil
}
interfacesWithIPv6 := make([]interfaceIP, 0)
for _, interf := range interfaces {
interfaceAddrs, err := interf.Addrs()
if err != nil {
continue
}
foundIPv4SrcInterface := false
for _, interfaceAddr := range interfaceAddrs {
if ipnet, ok := interfaceAddr.(*net.IPNet); ok {
ip := ipnet.IP
if ip.Equal(ipv4Src.AsSlice()) {
foundIPv4SrcInterface = true
}
if ip.To4() == nil {
interfacesWithIPv6 = append(interfacesWithIPv6, interfaceIP{
name: interf.Name,
ip: ip,
})
}
}
}
// Found the interface of ipv4Src. Loop through the addresses to see if there is an IPv6
if foundIPv4SrcInterface {
for _, interfaceAddr := range interfaceAddrs {
if ipnet, ok := interfaceAddr.(*net.IPNet); ok {
ip := ipnet.IP
if ip.To4() == nil {
addr, err := netip.ParseAddr(ip.String())
if err == nil {
return addr, interf.Name, nil
}
}
}
}
}
}
for _, interf := range interfacesWithIPv6 {
addr, err := netip.ParseAddr(interf.ip.String())
if err == nil {
return addr, interf.name, nil
}
}
logger.Debug().Err(err).Msgf("Failed to determine the IPv6 for this machine. It will use %s to send/listen for ICMPv6 echo", netip.IPv6Unspecified())
return netip.IPv6Unspecified(), "", nil
}
// FindLocalAddr tries to dial UDP and returns the local address picked by the OS
func findLocalAddr(dst net.IP, port int) (netip.Addr, error) {
udpConn, err := net.DialUDP("udp", nil, &net.UDPAddr{
IP: dst,
Port: port,
})
if err != nil {
return netip.Addr{}, err
}
defer udpConn.Close()
localAddrPort, err := netip.ParseAddrPort(udpConn.LocalAddr().String())
if err != nil {
return netip.Addr{}, err
}
localAddr := localAddrPort.Addr()
return localAddr, nil
}

View File

@ -0,0 +1,3 @@
package tunnel
var FipsEnabled bool

View File

@ -78,7 +78,6 @@ func RunQuickTunnel(sc *subcommandContext) error {
buildInfo,
&connection.NamedTunnelProperties{Credentials: credentials, QuickTunnelUrl: data.Result.Hostname},
sc.log,
sc.isUIEnabled,
)
}

View File

@ -31,10 +31,9 @@ func (e errInvalidJSONCredential) Error() string {
// subcommandContext carries structs shared between subcommands, to reduce number of arguments needed to
// pass between subcommands, and make sure they are only initialized once
type subcommandContext struct {
c *cli.Context
log *zerolog.Logger
isUIEnabled bool
fs fileSystem
c *cli.Context
log *zerolog.Logger
fs fileSystem
// These fields should be accessed using their respective Getter
tunnelstoreClient cfapi.Client
@ -42,16 +41,10 @@ type subcommandContext struct {
}
func newSubcommandContext(c *cli.Context) (*subcommandContext, error) {
isUIEnabled := c.IsSet(uiFlag) && c.String("name") != ""
// If UI is enabled, terminal log output should be disabled -- log should be written into a UI log window instead
log := logger.CreateLoggerFromContext(c, isUIEnabled)
return &subcommandContext{
c: c,
log: log,
isUIEnabled: isUIEnabled,
fs: realFileSystem{},
c: c,
log: logger.CreateLoggerFromContext(c, logger.EnableTerminalLog),
fs: realFileSystem{},
}, nil
}
@ -220,7 +213,6 @@ func (sc *subcommandContext) create(name string, credentialsFilePath string, sec
}
fmt.Println(" Keep this file secret. To revoke these credentials, delete the tunnel.")
fmt.Printf("\nCreated tunnel %s with id %s\n", tunnel.Name, tunnel.ID)
fmt.Printf("\nTunnel Token: %s\n", tunnel.Token)
return &tunnel.Tunnel, nil
}
@ -313,7 +305,6 @@ func (sc *subcommandContext) runWithCredentials(credentials connection.Credentia
buildInfo,
&connection.NamedTunnelProperties{Credentials: credentials},
sc.log,
sc.isUIEnabled,
)
}
@ -342,6 +333,21 @@ func (sc *subcommandContext) cleanupConnections(tunnelIDs []uuid.UUID) error {
return nil
}
func (sc *subcommandContext) getTunnelTokenCredentials(tunnelID uuid.UUID) (*connection.TunnelToken, error) {
client, err := sc.client()
if err != nil {
return nil, err
}
token, err := client.GetTunnelToken(tunnelID)
if err != nil {
sc.log.Err(err).Msgf("Could not get the Token for the given Tunnel %v", tunnelID)
return nil, err
}
return ParseToken(token)
}
func (sc *subcommandContext) route(tunnelID uuid.UUID, r cfapi.HostnameRoute) (cfapi.HostnameRouteResult, error) {
client, err := sc.client()
if err != nil {

View File

@ -35,7 +35,6 @@ func Test_subcommandContext_findCredentials(t *testing.T) {
type fields struct {
c *cli.Context
log *zerolog.Logger
isUIEnabled bool
fs fileSystem
tunnelstoreClient cfapi.Client
userCredential *userCredential
@ -168,7 +167,6 @@ func Test_subcommandContext_findCredentials(t *testing.T) {
sc := &subcommandContext{
c: tt.fields.c,
log: tt.fields.log,
isUIEnabled: tt.fields.isUIEnabled,
fs: tt.fields.fs,
tunnelstoreClient: tt.fields.tunnelstoreClient,
userCredential: tt.fields.userCredential,
@ -216,6 +214,10 @@ func (d *deleteMockTunnelStore) GetTunnel(tunnelID uuid.UUID) (*cfapi.Tunnel, er
return &tunnel.tunnel, nil
}
func (d *deleteMockTunnelStore) GetTunnelToken(tunnelID uuid.UUID) (string, error) {
return "token", nil
}
func (d *deleteMockTunnelStore) DeleteTunnel(tunnelID uuid.UUID) error {
tunnel, ok := d.mockTunnels[tunnelID]
if !ok {
@ -303,7 +305,6 @@ func Test_subcommandContext_Delete(t *testing.T) {
sc := &subcommandContext{
c: tt.fields.c,
log: tt.fields.log,
isUIEnabled: tt.fields.isUIEnabled,
fs: tt.fields.fs,
tunnelstoreClient: tt.fields.tunnelstoreClient,
userCredential: tt.fields.userCredential,

View File

@ -20,7 +20,7 @@ import (
"github.com/urfave/cli/v2"
"github.com/urfave/cli/v2/altsrc"
"golang.org/x/net/idna"
yaml "gopkg.in/yaml.v2"
yaml "gopkg.in/yaml.v3"
"github.com/cloudflare/cloudflared/cfapi"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
@ -134,12 +134,19 @@ var (
}
selectProtocolFlag = altsrc.NewStringFlag(&cli.StringFlag{
Name: "protocol",
Value: "auto",
Value: connection.AutoSelectFlag,
Aliases: []string{"p"},
Usage: fmt.Sprintf("Protocol implementation to connect with Cloudflare's edge network. %s", connection.AvailableProtocolFlagMessage),
EnvVars: []string{"TUNNEL_TRANSPORT_PROTOCOL"},
Hidden: true,
})
postQuantumFlag = altsrc.NewBoolFlag(&cli.BoolFlag{
Name: "post-quantum",
Usage: "When given creates an experimental post-quantum secure tunnel",
Aliases: []string{"pq"},
EnvVars: []string{"TUNNEL_POST_QUANTUM"},
Hidden: FipsEnabled,
})
sortInfoByFlag = &cli.StringFlag{
Name: "sort-by",
Value: "createdAt",
@ -169,6 +176,16 @@ var (
Usage: "Base64 encoded secret to set for the tunnel. The decoded secret must be at least 32 bytes long. If not specified, a random 32-byte secret will be generated.",
EnvVars: []string{"TUNNEL_CREATE_SECRET"},
}
icmpv4SrcFlag = &cli.StringFlag{
Name: "icmpv4-src",
Usage: "Source address to send/receive ICMPv4 messages. If not provided cloudflared will dial a local address to determine the source IP or fallback to 0.0.0.0.",
EnvVars: []string{"TUNNEL_ICMPV4_SRC"},
}
icmpv6SrcFlag = &cli.StringFlag{
Name: "icmpv6-src",
Usage: "Source address and the interface name to send/receive ICMPv6 messages. If not provided cloudflared will dial a local address to determine the source IP or fallback to ::.",
EnvVars: []string{"TUNNEL_ICMPV6_SRC"},
}
)
func buildCreateCommand() *cli.Command {
@ -602,9 +619,12 @@ func buildRunCommand() *cli.Command {
forceFlag,
credentialsFileFlag,
credentialsContentsFlag,
postQuantumFlag,
selectProtocolFlag,
featuresFlag,
tunnelTokenFlag,
icmpv4SrcFlag,
icmpv6SrcFlag,
}
flags = append(flags, configureProxyFlags(false)...)
return &cli.Command{
@ -644,7 +664,7 @@ func runCommand(c *cli.Context) error {
// Check if token is provided and if not use default tunnelID flag method
if tokenStr := c.String(TunnelTokenFlag); tokenStr != "" {
if token, err := parseToken(tokenStr); err == nil {
if token, err := ParseToken(tokenStr); err == nil {
return sc.runWithCredentials(token.Credentials())
}
@ -663,7 +683,7 @@ func runCommand(c *cli.Context) error {
}
}
func parseToken(tokenStr string) (*connection.TunnelToken, error) {
func ParseToken(tokenStr string) (*connection.TunnelToken, error) {
content, err := base64.StdEncoding.DecodeString(tokenStr)
if err != nil {
return nil, err
@ -714,6 +734,59 @@ func cleanupCommand(c *cli.Context) error {
return sc.cleanupConnections(tunnelIDs)
}
func buildTokenCommand() *cli.Command {
return &cli.Command{
Name: "token",
Action: cliutil.ConfiguredAction(tokenCommand),
Usage: "Fetch the credentials token for an existing tunnel (by name or UUID) that allows to run it",
UsageText: "cloudflared tunnel [tunnel command options] token [subcommand options] TUNNEL",
Description: "cloudflared tunnel token will fetch the credentials token for a given tunnel (by its name or UUID), which is then used to run the tunnel. This command fails if the tunnel does not exist or has been deleted. Use the flag `cloudflared tunnel token --cred-file /my/path/file.json TUNNEL` to output the token to the credentials JSON file. Note: this command only works for Tunnels created since cloudflared version 2022.3.0",
Flags: []cli.Flag{credentialsFileFlagCLIOnly},
CustomHelpTemplate: commandHelpTemplate(),
}
}
func tokenCommand(c *cli.Context) error {
sc, err := newSubcommandContext(c)
if err != nil {
return errors.Wrap(err, "error setting up logger")
}
warningChecker := updater.StartWarningCheck(c)
defer warningChecker.LogWarningIfAny(sc.log)
if c.NArg() != 1 {
return cliutil.UsageError(`"cloudflared tunnel token" requires exactly 1 argument, the name or UUID of tunnel to fetch the credentials token for.`)
}
tunnelID, err := sc.findID(c.Args().First())
if err != nil {
return errors.Wrap(err, "error parsing tunnel ID")
}
token, err := sc.getTunnelTokenCredentials(tunnelID)
if err != nil {
return err
}
if path := c.String(CredFileFlag); path != "" {
credentials := token.Credentials()
err := writeTunnelCredentials(path, &credentials)
if err != nil {
return errors.Wrapf(err, "error writing token credentials to JSON file in path %s", path)
}
return nil
}
encodedToken, err := token.Encode()
if err != nil {
return err
}
fmt.Printf("%s", encodedToken)
return nil
}
func buildRouteCommand() *cli.Command {
return &cli.Command{
Name: "route",
@ -748,7 +821,7 @@ Further information about managing Cloudflare WARP traffic to your tunnel is ava
Name: "lb",
Action: cliutil.ConfiguredAction(routeLbCommand),
Usage: "Use this tunnel as a load balancer origin, creating pool and load balancer if necessary",
UsageText: "cloudflared tunnel route dns [TUNNEL] [HOSTNAME] [LB-POOL]",
UsageText: "cloudflared tunnel route lb [TUNNEL] [HOSTNAME] [LB-POOL]",
Description: `Creates Load Balancer with an origin pool that points to the tunnel.`,
},
buildRouteIPSubcommand(),

View File

@ -183,7 +183,7 @@ func Test_validateHostname(t *testing.T) {
}
func Test_TunnelToken(t *testing.T) {
token, err := parseToken("aabc")
token, err := ParseToken("aabc")
require.Error(t, err)
require.Nil(t, token)
@ -198,7 +198,7 @@ func Test_TunnelToken(t *testing.T) {
token64 := base64.StdEncoding.EncodeToString(tokenJsonStr)
token, err = parseToken(token64)
token, err = ParseToken(token64)
require.NoError(t, err)
require.Equal(t, token, expectedToken)
}

View File

@ -1,223 +0,0 @@
package ui
import (
"context"
"fmt"
"strings"
"github.com/gdamore/tcell"
"github.com/rivo/tview"
"github.com/rs/zerolog"
"github.com/cloudflare/cloudflared/connection"
"github.com/cloudflare/cloudflared/ingress"
)
type connState struct {
location string
}
type uiModel struct {
version string
edgeURL string
metricsURL string
localServices []string
connections []connState
}
type palette struct {
url string
connected string
defaultText string
disconnected string
reconnecting string
unregistered string
}
func NewUIModel(version, hostname, metricsURL string, ing *ingress.Ingress, haConnections int) *uiModel {
localServices := make([]string, len(ing.Rules))
for i, rule := range ing.Rules {
localServices[i] = rule.Service.String()
}
return &uiModel{
version: version,
edgeURL: hostname,
metricsURL: metricsURL,
localServices: localServices,
connections: make([]connState, haConnections),
}
}
func (data *uiModel) Launch(
ctx context.Context,
log, transportLog *zerolog.Logger,
) connection.EventSink {
// Configure the logger to stream logs into the textview
// Add TextView as a group to write output to
logTextView := NewDynamicColorTextView()
// TODO: Format log for UI
//log.Add(logTextView, logger.NewUIFormatter(time.RFC3339), logLevels...)
//transportLog.Add(logTextView, logger.NewUIFormatter(time.RFC3339), logLevels...)
// Construct the UI
palette := palette{
url: "lightblue",
connected: "lime",
defaultText: "white",
disconnected: "red",
reconnecting: "orange",
unregistered: "orange",
}
app := tview.NewApplication()
grid := tview.NewGrid().SetGap(1, 0)
frame := tview.NewFrame(grid)
header := fmt.Sprintf("cloudflared [::b]%s", data.version)
frame.AddText(header, true, tview.AlignLeft, tcell.ColorWhite)
// Create table to store connection info and status
connTable := tview.NewTable()
// SetColumns takes a value for each column, representing the size of the column
// Numbers <= 0 represent proportional widths and positive numbers represent absolute widths
grid.SetColumns(20, 0)
// SetRows takes a value for each row, representing the size of the row
grid.SetRows(1, 1, len(data.connections), 1, 0)
// AddItem takes a primitive tview type, row, column, rowSpan, columnSpan, minGridHeight, minGridWidth, and focus
grid.AddItem(tview.NewTextView().SetText("Tunnel:"), 0, 0, 1, 1, 0, 0, false)
grid.AddItem(tview.NewTextView().SetText("Status:"), 1, 0, 1, 1, 0, 0, false)
grid.AddItem(tview.NewTextView().SetText("Connections:"), 2, 0, 1, 1, 0, 0, false)
grid.AddItem(tview.NewTextView().SetText("Metrics:"), 3, 0, 1, 1, 0, 0, false)
tunnelHostText := tview.NewTextView().SetText(data.edgeURL)
grid.AddItem(tunnelHostText, 0, 1, 1, 1, 0, 0, false)
status := fmt.Sprintf("[%s]\u2022[%s] Proxying to [%s::b]%s", palette.connected, palette.defaultText, palette.url, strings.Join(data.localServices, ", "))
grid.AddItem(NewDynamicColorTextView().SetText(status), 1, 1, 1, 1, 0, 0, false)
grid.AddItem(connTable, 2, 1, 1, 1, 0, 0, false)
grid.AddItem(NewDynamicColorTextView().SetText(fmt.Sprintf("Metrics at [%s::b]http://%s/metrics", palette.url, data.metricsURL)), 3, 1, 1, 1, 0, 0, false)
// Add TextView to stream logs
// Logs are displayed in a new grid so a border can be set around them
logGrid := tview.NewGrid().SetBorders(true).AddItem(logTextView.SetChangedFunc(handleNewText(app, logTextView)), 0, 0, 5, 2, 0, 0, false)
// LogFrame holds the Logs header as well as the grid with the textView for streamed logs
logFrame := tview.NewFrame(logGrid).AddText("[::b]Logs:[::-]", true, tview.AlignLeft, tcell.ColorWhite).SetBorders(0, 0, 0, 0, 0, 0)
// Footer for log frame
logFrame.AddText("[::d]Use Ctrl+C to exit[::-]", false, tview.AlignRight, tcell.ColorWhite)
grid.AddItem(logFrame, 4, 0, 5, 2, 0, 0, false)
go func() {
<-ctx.Done()
app.Stop()
return
}()
go func() {
if err := app.SetRoot(frame, true).Run(); err != nil {
log.Error().Msgf("Error launching UI: %s", err)
}
}()
return connection.EventSinkFunc(func(event connection.Event) {
switch event.EventType {
case connection.Connected:
data.setConnTableCell(event, connTable, palette)
case connection.Disconnected, connection.Reconnecting, connection.Unregistering:
data.changeConnStatus(event, connTable, log, palette)
case connection.SetURL:
tunnelHostText.SetText(event.URL)
data.edgeURL = event.URL
case connection.RegisteringTunnel:
if data.edgeURL == "" {
tunnelHostText.SetText(fmt.Sprintf("Registering tunnel connection %d...", event.Index))
}
}
app.Draw()
})
}
func NewDynamicColorTextView() *tview.TextView {
return tview.NewTextView().SetDynamicColors(true)
}
// Re-draws application when new logs are streamed to UI
func handleNewText(app *tview.Application, logTextView *tview.TextView) func() {
return func() {
app.Draw()
// SetFocus to enable scrolling in textview
app.SetFocus(logTextView)
}
}
func (data *uiModel) changeConnStatus(event connection.Event, table *tview.Table, log *zerolog.Logger, palette palette) {
index := int(event.Index)
// Get connection location and state
connState := data.getConnState(index)
// Check if connection is already displayed in UI
if connState == nil {
log.Info().Msg("Connection is not in the UI table")
return
}
locationState := event.Location
if event.EventType == connection.Reconnecting {
locationState = "Reconnecting..."
}
connectionNum := index + 1
// Get table cell
cell := table.GetCell(index, 0)
// Change dot color in front of text as well as location state
text := newCellText(palette, connectionNum, locationState, event.EventType)
cell.SetText(text)
}
// Return connection location and row in UI table
func (data *uiModel) getConnState(connID int) *connState {
if connID < len(data.connections) {
return &data.connections[connID]
}
return nil
}
func (data *uiModel) setConnTableCell(event connection.Event, table *tview.Table, palette palette) {
index := int(event.Index)
connectionNum := index + 1
// Update slice to keep track of connection location and state in UI table
data.connections[index].location = event.Location
// Update text in table cell to show disconnected state
text := newCellText(palette, connectionNum, event.Location, event.EventType)
cell := tview.NewTableCell(text)
table.SetCell(index, 0, cell)
}
func newCellText(palette palette, connectionNum int, location string, connectedStatus connection.Status) string {
// HA connection indicator formatted as: "• #<CONNECTION_INDEX>: <COLO>",
// where the left middle dot's color depends on the status of the connection
const connFmtString = "[%s]\u2022[%s] #%d: %s"
var dotColor string
switch connectedStatus {
case connection.Connected:
dotColor = palette.connected
case connection.Disconnected:
dotColor = palette.disconnected
case connection.Reconnecting:
dotColor = palette.reconnecting
case connection.Unregistering:
dotColor = palette.unregistered
}
return fmt.Sprintf(connFmtString, dotColor, palette.defaultText, connectionNum, location)
}

View File

@ -30,7 +30,8 @@ const (
)
var (
version string
version string
BuiltForPackageManager = ""
)
// BinaryUpdated implements ExitCoder interface, the app will exit with status code 11
@ -118,7 +119,11 @@ func Update(c *cli.Context) error {
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
if wasInstalledFromPackageManager() {
log.Error().Msg("cloudflared was installed by a package manager. Please update using the same method.")
packageManagerName := "a package manager"
if BuiltForPackageManager != "" {
packageManagerName = BuiltForPackageManager
}
log.Error().Msg(fmt.Sprintf("cloudflared was installed by %s. Please update using the same method.", packageManagerName))
return nil
}
@ -281,7 +286,7 @@ func supportAutoUpdate(log *zerolog.Logger) bool {
func wasInstalledFromPackageManager() bool {
ok, _ := config.FileExists(filepath.Join(config.DefaultUnixConfigLocation, isManagedInstallFile))
return ok
return len(BuiltForPackageManager) != 0 || ok
}
func isRunningFromTerminal() bool {

View File

@ -26,8 +26,8 @@ import (
const (
windowsServiceName = "Cloudflared"
windowsServiceDescription = "Cloudflare Tunnel agent"
windowsServiceUrl = "https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/run-tunnel/run-as-service#windows"
windowsServiceDescription = "Cloudflared agent"
windowsServiceUrl = "https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/run-tunnel/as-a-service/windows/"
recoverActionDelay = time.Second * 20
failureCountResetPeriod = time.Hour * 24
@ -46,16 +46,16 @@ const (
func runApp(app *cli.App, graceShutdownC chan struct{}) {
app.Commands = append(app.Commands, &cli.Command{
Name: "service",
Usage: "Manages the Cloudflare Tunnel Windows service",
Usage: "Manages the cloudflared Windows service",
Subcommands: []*cli.Command{
{
Name: "install",
Usage: "Install Cloudflare Tunnel as a Windows service",
Usage: "Install cloudflared as a Windows service",
Action: cliutil.ConfiguredAction(installWindowsService),
},
{
Name: "uninstall",
Usage: "Uninstall the Cloudflare Tunnel service",
Usage: "Uninstall the cloudflared service",
Action: cliutil.ConfiguredAction(uninstallWindowsService),
},
},
@ -177,7 +177,7 @@ func (s *windowsService) Execute(serviceArgs []string, r <-chan svc.ChangeReques
func installWindowsService(c *cli.Context) error {
zeroLogger := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
zeroLogger.Info().Msg("Installing Cloudflare Tunnel Windows service")
zeroLogger.Info().Msg("Installing cloudflared Windows service")
exepath, err := os.Executable()
if err != nil {
return errors.Wrap(err, "Cannot find path name that start the process")
@ -191,26 +191,39 @@ func installWindowsService(c *cli.Context) error {
log := zeroLogger.With().Str(LogFieldWindowsServiceName, windowsServiceName).Logger()
if err == nil {
s.Close()
return fmt.Errorf("Service %s already exists", windowsServiceName)
return fmt.Errorf(serviceAlreadyExistsWarn(windowsServiceName))
}
extraArgs, err := getServiceExtraArgsFromCliArgs(c, &log)
if err != nil {
errMsg := "Unable to determine extra arguments for windows service"
log.Err(err).Msg(errMsg)
return errors.Wrap(err, errMsg)
}
config := mgr.Config{StartType: mgr.StartAutomatic, DisplayName: windowsServiceDescription}
s, err = m.CreateService(windowsServiceName, exepath, config)
s, err = m.CreateService(windowsServiceName, exepath, config, extraArgs...)
if err != nil {
return errors.Wrap(err, "Cannot install service")
}
defer s.Close()
log.Info().Msg("Cloudflare Tunnel agent service is installed")
log.Info().Msg("cloudflared agent service is installed")
err = eventlog.InstallAsEventCreate(windowsServiceName, eventlog.Error|eventlog.Warning|eventlog.Info)
if err != nil {
s.Delete()
return errors.Wrap(err, "Cannot install event logger")
}
err = configRecoveryOption(s.Handle)
if err != nil {
log.Err(err).Msg("Cannot set service recovery actions")
log.Info().Msgf("See %s to manually configure service recovery actions", windowsServiceUrl)
}
return nil
err = s.Start()
if err == nil {
log.Info().Msg("Agent service for cloudflared installed successfully")
}
return err
}
func uninstallWindowsService(c *cli.Context) error {
@ -218,7 +231,7 @@ func uninstallWindowsService(c *cli.Context) error {
With().
Str(LogFieldWindowsServiceName, windowsServiceName).Logger()
log.Info().Msg("Uninstalling Cloudflare Tunnel Windows Service")
log.Info().Msg("Uninstalling cloudflared agent service")
m, err := mgr.Connect()
if err != nil {
return errors.Wrap(err, "Cannot establish a connection to the service control manager")
@ -226,14 +239,22 @@ func uninstallWindowsService(c *cli.Context) error {
defer m.Disconnect()
s, err := m.OpenService(windowsServiceName)
if err != nil {
return fmt.Errorf("Service %s is not installed", windowsServiceName)
return fmt.Errorf("Agent service %s is not installed, so it could not be uninstalled", windowsServiceName)
}
defer s.Close()
if status, err := s.Query(); err == nil && status.State == svc.Running {
log.Info().Msg("Stopping cloudflared agent service")
if _, err := s.Control(svc.Stop); err != nil {
log.Info().Err(err).Msg("Failed to stop cloudflared agent service, you may need to stop it manually to complete uninstall.")
}
}
err = s.Delete()
if err != nil {
return errors.Wrap(err, "Cannot delete service")
return errors.Wrap(err, "Cannot delete agent service")
}
log.Info().Msg("Cloudflare Tunnel agent service is uninstalled")
log.Info().Msg("Agent service for cloudflared was uninstalled successfully")
err = eventlog.Remove(windowsServiceName)
if err != nil {
return errors.Wrap(err, "Cannot remove event logger")

View File

@ -14,7 +14,7 @@ classic_hostname: "classic-tunnel-component-tests.example.com"
origincert: "/Users/tunnel/.cloudflared/cert.pem"
ingress:
- hostname: named-tunnel-component-tests.example.com
service: http_status:200
service: hello_world
- service: http_status:404
```

95
component-tests/cli.py Normal file
View File

@ -0,0 +1,95 @@
import json
import subprocess
from time import sleep
from setup import get_config_from_file
SINGLE_CASE_TIMEOUT = 600
class CloudflaredCli:
def __init__(self, config, config_path, logger):
self.basecmd = [config.cloudflared_binary, "tunnel"]
if config_path is not None:
self.basecmd += ["--config", str(config_path)]
origincert = get_config_from_file()["origincert"]
if origincert:
self.basecmd += ["--origincert", origincert]
self.logger = logger
def _run_command(self, subcmd, subcmd_name, needs_to_pass=True):
cmd = self.basecmd + subcmd
# timeout limits the time a subprocess can run. This is useful to guard against running a tunnel when
# command/args are in wrong order.
result = run_subprocess(cmd, subcmd_name, self.logger, check=needs_to_pass, capture_output=True, timeout=15)
return result
def list_tunnels(self):
cmd_args = ["list", "--output", "json"]
listed = self._run_command(cmd_args, "list")
return json.loads(listed.stdout)
def get_tunnel_info(self, tunnel_id):
info = self._run_command(["info", "--output", "json", tunnel_id], "info")
return json.loads(info.stdout)
def __enter__(self):
self.basecmd += ["run"]
self.process = subprocess.Popen(self.basecmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
self.logger.info(f"Run cmd {self.basecmd}")
return self.process
def __exit__(self, exc_type, exc_value, exc_traceback):
terminate_gracefully(self.process, self.logger, self.basecmd)
self.logger.debug(f"{self.basecmd} logs: {self.process.stderr.read()}")
def terminate_gracefully(process, logger, cmd):
process.terminate()
process_terminated = wait_for_terminate(process)
if not process_terminated:
process.kill()
logger.warning(f"{cmd}: cloudflared did not terminate within wait period. Killing process. logs: \
stdout: {process.stdout.read()}, stderr: {process.stderr.read()}")
def wait_for_terminate(opened_subprocess, attempts=10, poll_interval=1):
"""
wait_for_terminate polls the opened_subprocess every x seconds for a given number of attempts.
It returns true if the subprocess was terminated and false if it didn't.
"""
for _ in range(attempts):
if _is_process_stopped(opened_subprocess):
return True
sleep(poll_interval)
return False
def _is_process_stopped(process):
return process.poll() is not None
def cert_path():
return get_config_from_file()["origincert"]
class SubprocessError(Exception):
def __init__(self, program, exit_code, cause):
self.program = program
self.exit_code = exit_code
self.cause = cause
def run_subprocess(cmd, cmd_name, logger, timeout=SINGLE_CASE_TIMEOUT, **kargs):
kargs["timeout"] = timeout
try:
result = subprocess.run(cmd, **kargs)
logger.debug(f"{cmd} log: {result.stdout}", extra={"cmd": cmd_name})
return result
except subprocess.CalledProcessError as e:
err = f"{cmd} return exit code {e.returncode}, stderr" + e.stderr.decode("utf-8")
logger.error(err, extra={"cmd": cmd_name, "return_code": e.returncode})
raise SubprocessError(cmd[0], e.returncode, e)
except subprocess.TimeoutExpired as e:
err = f"{cmd} timeout after {e.timeout} seconds, stdout: {e.stdout}, stderr: {e.stderr}"
logger.error(err, extra={"cmd": cmd_name, "return_code": "timeout"})
raise e

View File

@ -1,5 +1,7 @@
#!/usr/bin/env python
import copy
import json
import base64
from dataclasses import dataclass, InitVar
@ -61,6 +63,28 @@ class NamedTunnelConfig(NamedTunnelBaseConfig):
def get_url(self):
return "https://" + self.ingress[0]['hostname']
def base_config(self):
config = self.full_config.copy()
# removes the tunnel reference
del(config["tunnel"])
del(config["credentials-file"])
return config
def get_tunnel_id(self):
return self.full_config["tunnel"]
def get_token(self):
creds = self.get_credentials_json()
token_dict = {"a": creds["AccountTag"], "t": creds["TunnelID"], "s": creds["TunnelSecret"]}
token_json_str = json.dumps(token_dict)
return base64.b64encode(token_json_str.encode('utf-8'))
def get_credentials_json(self):
with open(self.credentials_file) as json_file:
return json.load(json_file)
@dataclass(frozen=True)
class ClassicTunnelBaseConfig(BaseConfig):

View File

@ -1,6 +1,7 @@
METRICS_PORT = 51000
MAX_RETRIES = 5
BACKOFF_SECS = 7
MAX_LOG_LINES = 50
PROXY_DNS_PORT = 9053

View File

@ -0,0 +1,165 @@
import ipaddress
import socket
import pytest
from constants import protocols
from cli import CloudflaredCli
from util import get_tunnel_connector_id, LOGGER, wait_tunnel_ready, write_config
class TestEdgeDiscovery:
def _extra_config(self, protocol, edge_ip_version):
config = {
"protocol": protocol,
}
if edge_ip_version:
config["edge-ip-version"] = edge_ip_version
return config
@pytest.mark.parametrize("protocol", protocols())
def test_default_only(self, tmp_path, component_tests_config, protocol):
"""
This test runs a tunnel to connect via IPv4-only edge addresses (default is unset "--edge-ip-version 4")
"""
if self.has_ipv6_only():
pytest.skip("Host has IPv6 only support and current default is IPv4 only")
self.expect_address_connections(
tmp_path, component_tests_config, protocol, None, self.expect_ipv4_address)
@pytest.mark.parametrize("protocol", protocols())
def test_ipv4_only(self, tmp_path, component_tests_config, protocol):
"""
This test runs a tunnel to connect via IPv4-only edge addresses
"""
if self.has_ipv6_only():
pytest.skip("Host has IPv6 only support")
self.expect_address_connections(
tmp_path, component_tests_config, protocol, "4", self.expect_ipv4_address)
@pytest.mark.parametrize("protocol", protocols())
def test_ipv6_only(self, tmp_path, component_tests_config, protocol):
"""
This test runs a tunnel to connect via IPv6-only edge addresses
"""
if self.has_ipv4_only():
pytest.skip("Host has IPv4 only support")
self.expect_address_connections(
tmp_path, component_tests_config, protocol, "6", self.expect_ipv6_address)
@pytest.mark.parametrize("protocol", protocols())
def test_auto_ip64(self, tmp_path, component_tests_config, protocol):
"""
This test runs a tunnel to connect via auto with a preference of IPv6 then IPv4 addresses for a dual stack host
This test also assumes that the host has IPv6 preference.
"""
if not self.has_dual_stack(address_family_preference=socket.AddressFamily.AF_INET6):
pytest.skip("Host does not support dual stack with IPv6 preference")
self.expect_address_connections(
tmp_path, component_tests_config, protocol, "auto", self.expect_ipv6_address)
@pytest.mark.parametrize("protocol", protocols())
def test_auto_ip46(self, tmp_path, component_tests_config, protocol):
"""
This test runs a tunnel to connect via auto with a preference of IPv4 then IPv6 addresses for a dual stack host
This test also assumes that the host has IPv4 preference.
"""
if not self.has_dual_stack(address_family_preference=socket.AddressFamily.AF_INET):
pytest.skip("Host does not support dual stack with IPv4 preference")
self.expect_address_connections(
tmp_path, component_tests_config, protocol, "auto", self.expect_ipv4_address)
def expect_address_connections(self, tmp_path, component_tests_config, protocol, edge_ip_version, assert_address_type):
config = component_tests_config(
self._extra_config(protocol, edge_ip_version))
config_path = write_config(tmp_path, config.full_config)
LOGGER.debug(config)
with CloudflaredCli(config, config_path, LOGGER):
wait_tunnel_ready(tunnel_url=config.get_url(),
require_min_connections=4)
cfd_cli = CloudflaredCli(config, config_path, LOGGER)
tunnel_id = config.get_tunnel_id()
info = cfd_cli.get_tunnel_info(tunnel_id)
connector_id = get_tunnel_connector_id()
connector = next(
(c for c in info["conns"] if c["id"] == connector_id), None)
assert connector, f"Expected connection info from get tunnel info for the connected instance: {info}"
conns = connector["conns"]
assert conns == None or len(
conns) == 4, f"There should be 4 connections registered: {conns}"
for conn in conns:
origin_ip = conn["origin_ip"]
assert origin_ip, f"No available origin_ip for this connection: {conn}"
assert_address_type(origin_ip)
def expect_ipv4_address(self, address):
assert type(ipaddress.ip_address(
address)) is ipaddress.IPv4Address, f"Expected connection from origin to be a valid IPv4 address: {address}"
def expect_ipv6_address(self, address):
assert type(ipaddress.ip_address(
address)) is ipaddress.IPv6Address, f"Expected connection from origin to be a valid IPv6 address: {address}"
def get_addresses(self):
"""
Returns a list of addresses for the host.
"""
host_addresses = socket.getaddrinfo(
"region1.v2.argotunnel.com", 7844, socket.AF_UNSPEC, socket.SOCK_STREAM)
assert len(
host_addresses) > 0, "No addresses returned from getaddrinfo"
return host_addresses
def has_dual_stack(self, address_family_preference=None):
"""
Returns true if the host has dual stack support and can optionally check
the provided IP family preference.
"""
dual_stack = not self.has_ipv6_only() and not self.has_ipv4_only()
if address_family_preference:
address = self.get_addresses()[0]
return dual_stack and address[0] == address_family_preference
return dual_stack
def has_ipv6_only(self):
"""
Returns True if the host has only IPv6 address support.
"""
return self.attempt_connection(socket.AddressFamily.AF_INET6) and not self.attempt_connection(socket.AddressFamily.AF_INET)
def has_ipv4_only(self):
"""
Returns True if the host has only IPv4 address support.
"""
return self.attempt_connection(socket.AddressFamily.AF_INET) and not self.attempt_connection(socket.AddressFamily.AF_INET6)
def attempt_connection(self, address_family):
"""
Returns True if a successful socket connection can be made to the
remote host with the provided address family to validate host support
for the provided address family.
"""
address = None
for a in self.get_addresses():
if a[0] == address_family:
address = a
break
if address is None:
# Couldn't even lookup the address family so we can't connect
return False
af, socktype, proto, canonname, sockaddr = address
s = None
try:
s = socket.socket(af, socktype, proto)
except OSError:
return False
try:
s.connect(sockaddr)
except OSError:
s.close()
return False
s.close()
return True

View File

@ -2,6 +2,7 @@
import json
import os
from constants import MAX_LOG_LINES
from util import start_cloudflared, wait_tunnel_ready, send_requests
# Rolling logger rotate log files after 1 MB
@ -11,14 +12,24 @@ expect_message = "Starting Hello"
def assert_log_to_terminal(cloudflared):
stderr = cloudflared.stderr.read(1500)
assert expect_message.encode() in stderr, f"{stderr} doesn't contain {expect_message}"
for _ in range(0, MAX_LOG_LINES):
line = cloudflared.stderr.readline()
if not line:
break
if expect_message.encode() in line:
return
raise Exception(f"terminal log doesn't contain {expect_message}")
def assert_log_in_file(file):
with open(file, "r") as f:
log = f.read(2000)
assert expect_message in log, f"{log} doesn't contain {expect_message}"
for _ in range(0, MAX_LOG_LINES):
line = f.readline()
if not line:
break
if expect_message in line:
return
raise Exception(f"log file doesn't contain {expect_message}")
def assert_json_log(file):

View File

@ -0,0 +1,17 @@
from util import LOGGER, nofips, start_cloudflared, wait_tunnel_ready
@nofips
class TestPostQuantum:
def _extra_config(self):
config = {
"protocol": "quic",
}
return config
def test_post_quantum(self, tmp_path, component_tests_config):
config = component_tests_config(self._extra_config())
LOGGER.debug(config)
with start_cloudflared(tmp_path, config, cfd_args=["run", "--post-quantum"], new_process=True):
wait_tunnel_ready(tunnel_url=config.get_url(),
require_min_connections=4)

View File

@ -47,17 +47,12 @@ class TestReconnect:
cloudflared.stdin.flush()
def assert_reconnect(self, config, cloudflared, repeat):
wait_tunnel_ready(tunnel_url=config.get_url(), require_min_connections=self.default_ha_conns)
wait_tunnel_ready(tunnel_url=config.get_url(),
require_min_connections=self.default_ha_conns)
for _ in range(repeat):
for i in range(self.default_ha_conns):
for _ in range(self.default_ha_conns):
self.send_reconnect(cloudflared, self.default_reconnect_secs)
expect_connections = self.default_ha_conns-i-1
if expect_connections > 0:
# Don't check if tunnel returns 200 here because there is a race condition between wait_tunnel_ready
# retrying to get 200 response and reconnecting
wait_tunnel_ready(require_min_connections=expect_connections)
else:
check_tunnel_not_connected()
check_tunnel_not_connected()
sleep(self.default_reconnect_secs * 2)
wait_tunnel_ready(tunnel_url=config.get_url(), require_min_connections=self.default_ha_conns)
wait_tunnel_ready(tunnel_url=config.get_url(),
require_min_connections=self.default_ha_conns)

View File

@ -1,6 +1,6 @@
#!/usr/bin/env python
import os
import platform
import pathlib
import subprocess
from contextlib import contextmanager
from pathlib import Path
@ -9,12 +9,7 @@ import pytest
import test_logging
from conftest import CfdModes
from util import start_cloudflared, wait_tunnel_ready
def select_platform(plat):
return pytest.mark.skipif(
platform.system() != plat, reason=f"Only runs on {plat}")
from util import select_platform, start_cloudflared, wait_tunnel_ready, write_config
def default_config_dir():
@ -43,6 +38,21 @@ class TestServiceMode:
self.launchd_service_scenario(config, assert_log_file)
@select_platform("Darwin")
@pytest.mark.skipif(os.path.exists(default_config_file()), reason=f"There is already a config file in default path")
def test_launchd_service_with_token(self, tmp_path, component_tests_config):
log_file = tmp_path / test_logging.default_log_file
additional_config = {
"logfile": str(log_file),
}
config = component_tests_config(additional_config=additional_config)
# service install doesn't install the config file but in this case we want to use some default settings
# so we write the base config without the tunnel credentials and ID
write_config(pathlib.Path(default_config_dir()), config.base_config())
self.launchd_service_scenario(config, use_token=True)
@select_platform("Darwin")
@pytest.mark.skipif(os.path.exists(default_config_file()), reason=f"There is already a config file in default path")
def test_launchd_service_rotating_log(self, tmp_path, component_tests_config):
@ -60,12 +70,13 @@ class TestServiceMode:
self.launchd_service_scenario(config, assert_rotating_log)
def launchd_service_scenario(self, config, extra_assertions):
with self.run_service(Path(default_config_dir()), config):
def launchd_service_scenario(self, config, extra_assertions=None, use_token=False):
with self.run_service(Path(default_config_dir()), config, use_token=use_token):
self.launchctl_cmd("list")
self.launchctl_cmd("start")
wait_tunnel_ready(tunnel_url=config.get_url())
extra_assertions()
if extra_assertions is not None:
extra_assertions()
self.launchctl_cmd("stop")
os.remove(default_config_file())
@ -105,27 +116,48 @@ class TestServiceMode:
self.sysv_service_scenario(config, tmp_path, assert_rotating_log)
def sysv_service_scenario(self, config, tmp_path, extra_assertions):
with self.run_service(tmp_path, config, root=True):
self.sysv_cmd("start")
@select_platform("Linux")
@pytest.mark.skipif(os.path.exists("/etc/cloudflared/config.yml"),
reason=f"There is already a config file in default path")
def test_sysv_service_with_token(self, tmp_path, component_tests_config):
additional_config = {
"loglevel": "debug",
}
config = component_tests_config(additional_config=additional_config)
# service install doesn't install the config file but in this case we want to use some default settings
# so we write the base config without the tunnel credentials and ID
config_path = write_config(tmp_path, config.base_config())
subprocess.run(["sudo", "cp", config_path, "/etc/cloudflared/config.yml"], check=True)
self.sysv_service_scenario(config, tmp_path, use_token=True)
def sysv_service_scenario(self, config, tmp_path, extra_assertions=None, use_token=False):
with self.run_service(tmp_path, config, root=True, use_token=use_token):
self.sysv_cmd("status")
wait_tunnel_ready(tunnel_url=config.get_url())
extra_assertions()
self.sysv_cmd("stop")
if extra_assertions is not None:
extra_assertions()
# Service install copies config file to /etc/cloudflared/config.yml
subprocess.run(["sudo", "rm", "/etc/cloudflared/config.yml"])
self.sysv_cmd("status", success=False)
@contextmanager
def run_service(self, tmp_path, config, root=False):
def run_service(self, tmp_path, config, root=False, use_token=False):
args = ["service", "install"]
if use_token:
args.append(config.get_token())
try:
service = start_cloudflared(
tmp_path, config, cfd_args=["service", "install"], cfd_pre_args=[], capture_output=False, root=root)
tmp_path, config, cfd_args=args, cfd_pre_args=[], capture_output=False, root=root, skip_config_flag=use_token)
yield service
finally:
start_cloudflared(
tmp_path, config, cfd_args=["service", "uninstall"], cfd_pre_args=[], capture_output=False, root=root)
tmp_path, config, cfd_args=["service", "uninstall"], cfd_pre_args=[], capture_output=False, root=root, skip_config_flag=use_token)
def launchctl_cmd(self, action, success=True):
cmd = subprocess.run(

View File

@ -0,0 +1,35 @@
import base64
import json
from setup import get_config_from_file, persist_origin_cert
from util import start_cloudflared
class TestToken:
def test_get_token(self, tmp_path, component_tests_config):
config = component_tests_config()
tunnel_id = config.get_tunnel_id()
token_args = ["--origincert", cert_path(), "token", tunnel_id]
output = start_cloudflared(tmp_path, config, token_args)
assert parse_token(config.get_token()) == parse_token(output.stdout)
def test_get_credentials_file(self, tmp_path, component_tests_config):
config = component_tests_config()
tunnel_id = config.get_tunnel_id()
cred_file = tmp_path / "test_get_credentials_file.json"
token_args = ["--origincert", cert_path(), "token", "--cred-file", cred_file, tunnel_id]
start_cloudflared(tmp_path, config, token_args)
with open(cred_file) as json_file:
assert config.get_credentials_json() == json.load(json_file)
def cert_path():
return get_config_from_file()["origincert"]
def parse_token(token):
return json.loads(base64.b64decode(token))

View File

@ -1,9 +1,12 @@
import logging
import os
import platform
import subprocess
from contextlib import contextmanager
from time import sleep
import pytest
import requests
import yaml
from retrying import retry
@ -13,6 +16,17 @@ from constants import METRICS_PORT, MAX_RETRIES, BACKOFF_SECS
LOGGER = logging.getLogger(__name__)
def select_platform(plat):
return pytest.mark.skipif(
platform.system() != plat, reason=f"Only runs on {plat}")
def fips_enabled():
env_fips = os.getenv("COMPONENT_TESTS_FIPS")
return env_fips is not None and env_fips != "0"
nofips = pytest.mark.skipif(
fips_enabled(), reason=f"Only runs without FIPS (COMPONENT_TESTS_FIPS=0)")
def write_config(directory, config):
config_path = directory / "config.yml"
with open(config_path, 'w') as outfile:
@ -21,9 +35,14 @@ def write_config(directory, config):
def start_cloudflared(directory, config, cfd_args=["run"], cfd_pre_args=["tunnel"], new_process=False,
allow_input=False, capture_output=True, root=False):
config_path = write_config(directory, config.full_config)
allow_input=False, capture_output=True, root=False, skip_config_flag=False):
config_path = None
if not skip_config_flag:
config_path = write_config(directory, config.full_config)
cmd = cloudflared_cmd(config, config_path, cfd_args, cfd_pre_args, root)
if new_process:
return run_cloudflared_background(cmd, allow_input, capture_output)
# By setting check=True, it will raise an exception if the process exits with non-zero exit code
@ -36,7 +55,10 @@ def cloudflared_cmd(config, config_path, cfd_args, cfd_pre_args, root):
cmd += ["sudo"]
cmd += [config.cloudflared_binary]
cmd += cfd_pre_args
cmd += ["--config", str(config_path)]
if config_path is not None:
cmd += ["--config", str(config_path)]
cmd += cfd_args
LOGGER.info(f"Run cmd {cmd} with config {config}")
return cmd
@ -46,13 +68,15 @@ def cloudflared_cmd(config, config_path, cfd_args, cfd_pre_args, root):
def run_cloudflared_background(cmd, allow_input, capture_output):
output = subprocess.PIPE if capture_output else subprocess.DEVNULL
stdin = subprocess.PIPE if allow_input else None
cfd = None
try:
cfd = subprocess.Popen(cmd, stdin=stdin, stdout=output, stderr=output)
yield cfd
finally:
cfd.terminate()
if capture_output:
LOGGER.info(f"cloudflared log: {cfd.stderr.read()}")
if cfd:
cfd.terminate()
if capture_output:
LOGGER.info(f"cloudflared log: {cfd.stderr.read()}")
def wait_tunnel_ready(tunnel_url=None, require_min_connections=1, cfd_logs=None):
@ -91,18 +115,31 @@ def _log_cloudflared_logs(cfd_logs):
LOGGER.warning(line)
@retry(stop_max_attempt_number=MAX_RETRIES * BACKOFF_SECS, wait_fixed=1000)
@retry(stop_max_attempt_number=MAX_RETRIES, wait_fixed=BACKOFF_SECS * 1000)
def check_tunnel_not_connected():
url = f'http://localhost:{METRICS_PORT}/ready'
try:
resp = requests.get(url, timeout=1)
resp = requests.get(url, timeout=BACKOFF_SECS)
assert resp.status_code == 503, f"Expect {url} returns 503, got {resp.status_code}"
assert resp.json()[
"readyConnections"] == 0, "Expected all connections to be terminated (pending reconnect)"
# cloudflared might already terminate
except requests.exceptions.ConnectionError as e:
LOGGER.warning(f"Failed to connect to {url}, error: {e}")
def get_tunnel_connector_id():
url = f'http://localhost:{METRICS_PORT}/ready'
try:
resp = requests.get(url, timeout=1)
return resp.json()["connectorId"]
# cloudflared might already terminated
except requests.exceptions.ConnectionError as e:
LOGGER.warning(f"Failed to connect to {url}, error: {e}")
# In some cases we don't need to check response status, such as when sending batch requests to generate logs
def send_requests(url, count, require_ok=True):
errors = 0

View File

@ -1,19 +1,21 @@
package config
import (
"encoding/json"
"fmt"
"io"
"net/url"
"os"
"path/filepath"
"runtime"
"strconv"
"time"
homedir "github.com/mitchellh/go-homedir"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"github.com/urfave/cli/v2"
yaml "gopkg.in/yaml.v2"
yaml "gopkg.in/yaml.v3"
"github.com/cloudflare/cloudflared/validation"
)
@ -49,7 +51,7 @@ func DefaultConfigDirectory() string {
path := os.Getenv("CFDPATH")
if path == "" {
path = filepath.Join(os.Getenv("ProgramFiles(x86)"), "cloudflared")
if _, err := os.Stat(path); os.IsNotExist(err) { //doesn't exist, so return an empty failure string
if _, err := os.Stat(path); os.IsNotExist(err) { // doesn't exist, so return an empty failure string
return ""
}
}
@ -138,7 +140,7 @@ func FindOrCreateConfigPath() string {
defer file.Close()
logDir := DefaultLogDirectory()
_ = os.MkdirAll(logDir, os.ModePerm) //try and create it. Doesn't matter if it succeed or not, only byproduct will be no logs
_ = os.MkdirAll(logDir, os.ModePerm) // try and create it. Doesn't matter if it succeed or not, only byproduct will be no logs
c := Root{
LogDirectory: logDir,
@ -175,9 +177,9 @@ func ValidateUrl(c *cli.Context, allowURLFromArgs bool) (*url.URL, error) {
}
type UnvalidatedIngressRule struct {
Hostname string `json:"hostname"`
Path string `json:"path"`
Service string `json:"service"`
Hostname string `json:"hostname,omitempty"`
Path string `json:"path,omitempty"`
Service string `json:"service,omitempty"`
OriginRequest OriginRequestConfig `yaml:"originRequest" json:"originRequest"`
}
@ -190,41 +192,56 @@ type UnvalidatedIngressRule struct {
// - To specify a time.Duration in json, use int64 of the nanoseconds
type OriginRequestConfig struct {
// HTTP proxy timeout for establishing a new connection
ConnectTimeout *time.Duration `yaml:"connectTimeout" json:"connectTimeout"`
ConnectTimeout *CustomDuration `yaml:"connectTimeout" json:"connectTimeout,omitempty"`
// HTTP proxy timeout for completing a TLS handshake
TLSTimeout *time.Duration `yaml:"tlsTimeout" json:"tlsTimeout"`
TLSTimeout *CustomDuration `yaml:"tlsTimeout" json:"tlsTimeout,omitempty"`
// HTTP proxy TCP keepalive duration
TCPKeepAlive *time.Duration `yaml:"tcpKeepAlive" json:"tcpKeepAlive"`
TCPKeepAlive *CustomDuration `yaml:"tcpKeepAlive" json:"tcpKeepAlive,omitempty"`
// HTTP proxy should disable "happy eyeballs" for IPv4/v6 fallback
NoHappyEyeballs *bool `yaml:"noHappyEyeballs" json:"noHappyEyeballs"`
NoHappyEyeballs *bool `yaml:"noHappyEyeballs" json:"noHappyEyeballs,omitempty"`
// HTTP proxy maximum keepalive connection pool size
KeepAliveConnections *int `yaml:"keepAliveConnections" json:"keepAliveConnections"`
KeepAliveConnections *int `yaml:"keepAliveConnections" json:"keepAliveConnections,omitempty"`
// HTTP proxy timeout for closing an idle connection
KeepAliveTimeout *time.Duration `yaml:"keepAliveTimeout" json:"keepAliveTimeout"`
KeepAliveTimeout *CustomDuration `yaml:"keepAliveTimeout" json:"keepAliveTimeout,omitempty"`
// Sets the HTTP Host header for the local webserver.
HTTPHostHeader *string `yaml:"httpHostHeader" json:"httpHostHeader"`
HTTPHostHeader *string `yaml:"httpHostHeader" json:"httpHostHeader,omitempty"`
// Hostname on the origin server certificate.
OriginServerName *string `yaml:"originServerName" json:"originServerName"`
OriginServerName *string `yaml:"originServerName" json:"originServerName,omitempty"`
// Path to the CA for the certificate of your origin.
// This option should be used only if your certificate is not signed by Cloudflare.
CAPool *string `yaml:"caPool" json:"caPool"`
CAPool *string `yaml:"caPool" json:"caPool,omitempty"`
// Disables TLS verification of the certificate presented by your origin.
// Will allow any certificate from the origin to be accepted.
// Note: The connection from your machine to Cloudflare's Edge is still encrypted.
NoTLSVerify *bool `yaml:"noTLSVerify" json:"noTLSVerify"`
NoTLSVerify *bool `yaml:"noTLSVerify" json:"noTLSVerify,omitempty"`
// Disables chunked transfer encoding.
// Useful if you are running a WSGI server.
DisableChunkedEncoding *bool `yaml:"disableChunkedEncoding" json:"disableChunkedEncoding"`
DisableChunkedEncoding *bool `yaml:"disableChunkedEncoding" json:"disableChunkedEncoding,omitempty"`
// Runs as jump host
BastionMode *bool `yaml:"bastionMode" json:"bastionMode"`
BastionMode *bool `yaml:"bastionMode" json:"bastionMode,omitempty"`
// Listen address for the proxy.
ProxyAddress *string `yaml:"proxyAddress" json:"proxyAddress"`
ProxyAddress *string `yaml:"proxyAddress" json:"proxyAddress,omitempty"`
// Listen port for the proxy.
ProxyPort *uint `yaml:"proxyPort" json:"proxyPort"`
ProxyPort *uint `yaml:"proxyPort" json:"proxyPort,omitempty"`
// Valid options are 'socks' or empty.
ProxyType *string `yaml:"proxyType" json:"proxyType"`
ProxyType *string `yaml:"proxyType" json:"proxyType,omitempty"`
// IP rules for the proxy service
IPRules []IngressIPRule `yaml:"ipRules" json:"ipRules"`
IPRules []IngressIPRule `yaml:"ipRules" json:"ipRules,omitempty"`
// Attempt to connect to origin with HTTP/2
Http2Origin *bool `yaml:"http2Origin" json:"http2Origin,omitempty"`
// Access holds all access related configs
Access *AccessConfig `yaml:"access" json:"access,omitempty"`
}
type AccessConfig struct {
// Required when set to true will fail every request that does not arrive through an access authenticated endpoint.
Required bool `yaml:"required" json:"required,omitempty"`
// TeamName is the organization team name to get the public key certificates for.
TeamName string `yaml:"teamName" json:"teamName"`
// AudTag is the AudTag to verify access JWT against.
AudTag []string `yaml:"audTag" json:"audTag"`
}
type IngressIPRule struct {
@ -242,7 +259,9 @@ type Configuration struct {
}
type WarpRoutingConfig struct {
Enabled bool `yaml:"enabled" json:"enabled"`
Enabled bool `yaml:"enabled" json:"enabled"`
ConnectTimeout *CustomDuration `yaml:"connectTimeout" json:"connectTimeout,omitempty"`
TCPKeepAlive *CustomDuration `yaml:"tcpKeepAlive" json:"tcpKeepAlive,omitempty"`
}
type configFileSettings struct {
@ -390,7 +409,7 @@ func ReadConfigFile(c *cli.Context, log *zerolog.Logger) (settings *configFileSe
// Parse it again, with strict mode, to find warnings.
if file, err := os.Open(configFile); err == nil {
decoder := yaml.NewDecoder(file)
decoder.SetStrict(true)
decoder.KnownFields(true)
var unusedConfig configFileSettings
if err := decoder.Decode(&unusedConfig); err != nil {
warnings = err.Error()
@ -399,3 +418,34 @@ func ReadConfigFile(c *cli.Context, log *zerolog.Logger) (settings *configFileSe
return &configuration, warnings, nil
}
// A CustomDuration is a Duration that has custom serialization for JSON.
// JSON in Javascript assumes that int fields are 32 bits and Duration fields are deserialized assuming that numbers
// are in nanoseconds, which in 32bit integers limits to just 2 seconds.
// This type assumes that when serializing/deserializing from JSON, that the number is in seconds, while it maintains
// the YAML serde assumptions.
type CustomDuration struct {
time.Duration
}
func (s CustomDuration) MarshalJSON() ([]byte, error) {
return json.Marshal(s.Duration.Seconds())
}
func (s *CustomDuration) UnmarshalJSON(data []byte) error {
seconds, err := strconv.ParseInt(string(data), 10, 64)
if err != nil {
return err
}
s.Duration = time.Duration(seconds * int64(time.Second))
return nil
}
func (s *CustomDuration) MarshalYAML() (interface{}, error) {
return s.Duration.String(), nil
}
func (s *CustomDuration) UnmarshalYAML(unmarshal func(interface{}) error) error {
return unmarshal(&s.Duration)
}

View File

@ -6,7 +6,8 @@ import (
"time"
"github.com/stretchr/testify/assert"
yaml "gopkg.in/yaml.v2"
"github.com/stretchr/testify/require"
yaml "gopkg.in/yaml.v3"
)
func TestConfigFileSettings(t *testing.T) {
@ -22,7 +23,9 @@ func TestConfigFileSettings(t *testing.T) {
Service: "https://localhost:8001",
}
warpRouting = WarpRoutingConfig{
Enabled: true,
Enabled: true,
ConnectTimeout: &CustomDuration{Duration: 2 * time.Second},
TCPKeepAlive: &CustomDuration{Duration: 10 * time.Second},
}
)
rawYAML := `
@ -47,6 +50,9 @@ ingress:
service: https://localhost:8001
warp-routing:
enabled: true
connectTimeout: 2s
tcpKeepAlive: 10s
retries: 5
grace-period: 30s
percentage: 3.14
@ -110,14 +116,13 @@ counters:
}
func TestUnmarshalOriginRequestConfig(t *testing.T) {
raw := []byte(`
var rawJsonConfig = []byte(`
{
"connectTimeout": 10000000000,
"tlsTimeout": 30000000000,
"tcpKeepAlive": 30000000000,
"connectTimeout": 10,
"tlsTimeout": 30,
"tcpKeepAlive": 30,
"noHappyEyeballs": true,
"keepAliveTimeout": 60000000000,
"keepAliveTimeout": 60,
"keepAliveConnections": 10,
"httpHostHeader": "app.tunnel.com",
"originServerName": "app.tunnel.com",
@ -139,16 +144,43 @@ func TestUnmarshalOriginRequestConfig(t *testing.T) {
"ports": [443, 4443],
"allow": true
}
]
],
"http2Origin": true
}
`)
func TestMarshalUnmarshalOriginRequest(t *testing.T) {
testCases := []struct {
name string
marshalFunc func(in interface{}) (out []byte, err error)
unMarshalFunc func(in []byte, out interface{}) (err error)
}{
{"json", json.Marshal, json.Unmarshal},
{"yaml", yaml.Marshal, yaml.Unmarshal},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
assertConfig(t, tc.marshalFunc, tc.unMarshalFunc)
})
}
}
func assertConfig(
t *testing.T,
marshalFunc func(in interface{}) (out []byte, err error),
unMarshalFunc func(in []byte, out interface{}) (err error),
) {
var config OriginRequestConfig
assert.NoError(t, json.Unmarshal(raw, &config))
assert.Equal(t, time.Second*10, *config.ConnectTimeout)
assert.Equal(t, time.Second*30, *config.TLSTimeout)
assert.Equal(t, time.Second*30, *config.TCPKeepAlive)
var config2 OriginRequestConfig
assert.NoError(t, json.Unmarshal(rawJsonConfig, &config))
assert.Equal(t, time.Second*10, config.ConnectTimeout.Duration)
assert.Equal(t, time.Second*30, config.TLSTimeout.Duration)
assert.Equal(t, time.Second*30, config.TCPKeepAlive.Duration)
assert.Equal(t, true, *config.NoHappyEyeballs)
assert.Equal(t, time.Second*60, *config.KeepAliveTimeout)
assert.Equal(t, time.Second*60, config.KeepAliveTimeout.Duration)
assert.Equal(t, 10, *config.KeepAliveConnections)
assert.Equal(t, "app.tunnel.com", *config.HTTPHostHeader)
assert.Equal(t, "app.tunnel.com", *config.OriginServerName)
@ -160,6 +192,7 @@ func TestUnmarshalOriginRequestConfig(t *testing.T) {
assert.Equal(t, true, *config.NoTLSVerify)
assert.Equal(t, uint(9000), *config.ProxyPort)
assert.Equal(t, "socks", *config.ProxyType)
assert.Equal(t, true, *config.Http2Origin)
privateV4 := "10.0.0.0/8"
privateV6 := "fc00::/7"
@ -176,4 +209,12 @@ func TestUnmarshalOriginRequestConfig(t *testing.T) {
},
}
assert.Equal(t, ipRules, config.IPRules)
// validate that serializing and deserializing again matches the deserialization from raw string
result, err := marshalFunc(config)
require.NoError(t, err)
err = unMarshalFunc(result, &config2)
require.NoError(t, err)
require.Equal(t, config2, config)
}

View File

@ -6,7 +6,7 @@ import (
"github.com/pkg/errors"
"github.com/rs/zerolog"
yaml "gopkg.in/yaml.v2"
yaml "gopkg.in/yaml.v3"
"github.com/cloudflare/cloudflared/watcher"
)

View File

@ -2,6 +2,7 @@ package connection
import (
"context"
"encoding/base64"
"fmt"
"io"
"math"
@ -11,7 +12,9 @@ import (
"time"
"github.com/google/uuid"
"github.com/pkg/errors"
"github.com/cloudflare/cloudflared/tracing"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
"github.com/cloudflare/cloudflared/websocket"
)
@ -21,13 +24,22 @@ const (
LogFieldConnIndex = "connIndex"
MaxGracePeriod = time.Minute * 3
MaxConcurrentStreams = math.MaxUint32
contentTypeHeader = "content-type"
sseContentType = "text/event-stream"
grpcContentType = "application/grpc"
)
var switchingProtocolText = fmt.Sprintf("%d %s", http.StatusSwitchingProtocols, http.StatusText(http.StatusSwitchingProtocols))
var (
switchingProtocolText = fmt.Sprintf("%d %s", http.StatusSwitchingProtocols, http.StatusText(http.StatusSwitchingProtocols))
flushableContentTypes = []string{sseContentType, grpcContentType}
)
type Orchestrator interface {
UpdateConfig(version int32, config []byte) *pogs.UpdateConfigurationResponse
GetConfigJSON() ([]byte, error)
GetOriginProxy() (OriginProxy, error)
WarpRoutingEnabled() (enabled bool)
}
type NamedTunnelProperties struct {
@ -65,6 +77,15 @@ func (t TunnelToken) Credentials() Credentials {
}
}
func (t TunnelToken) Encode() (string, error) {
val, err := json.Marshal(t)
if err != nil {
return "", errors.Wrap(err, "could not JSON encode token")
}
return base64.StdEncoding.EncodeToString(val), nil
}
type ClassicTunnelProperties struct {
Hostname string
OriginCert []byte
@ -80,6 +101,7 @@ const (
TypeTCP
TypeControlStream
TypeHTTP
TypeConfiguration
)
// ShouldFlush returns whether this kind of connection should actively flush data
@ -109,22 +131,24 @@ func (t Type) String() string {
// OriginProxy is how data flows from cloudflared to the origin services running behind it.
type OriginProxy interface {
ProxyHTTP(w ResponseWriter, req *http.Request, isWebsocket bool) error
ProxyHTTP(w ResponseWriter, tr *tracing.TracedHTTPRequest, isWebsocket bool) error
ProxyTCP(ctx context.Context, rwa ReadWriteAcker, req *TCPRequest) error
}
// TCPRequest defines the input format needed to perform a TCP proxy.
type TCPRequest struct {
Dest string
CFRay string
LBProbe bool
Dest string
CFRay string
LBProbe bool
FlowID string
CfTraceID string
}
// ReadWriteAcker is a readwriter with the ability to Acknowledge to the downstream (edge) that the origin has
// accepted the connection.
type ReadWriteAcker interface {
io.ReadWriter
AckConnection() error
AckConnection(tracePropagation string) error
}
// HTTPResponseReadWriteAcker is an HTTP implementation of ReadWriteAcker.
@ -153,22 +177,28 @@ func (h *HTTPResponseReadWriteAcker) Write(p []byte) (int, error) {
// AckConnection acks an HTTP connection by sending a switch protocols status code that enables the caller to
// upgrade to streams.
func (h *HTTPResponseReadWriteAcker) AckConnection() error {
func (h *HTTPResponseReadWriteAcker) AckConnection(tracePropagation string) error {
resp := &http.Response{
Status: switchingProtocolText,
StatusCode: http.StatusSwitchingProtocols,
ContentLength: -1,
Header: http.Header{},
}
if secWebsocketKey := h.req.Header.Get("Sec-WebSocket-Key"); secWebsocketKey != "" {
resp.Header = websocket.NewResponseHeader(h.req)
}
if tracePropagation != "" {
resp.Header.Add(tracing.CanonicalCloudflaredTracingHeader, tracePropagation)
}
return h.w.WriteRespHeaders(resp.StatusCode, resp.Header)
}
type ResponseWriter interface {
WriteRespHeaders(status int, header http.Header) error
AddTrailer(trailerName, trailerValue string)
io.Writer
}
@ -177,10 +207,18 @@ type ConnectedFuse interface {
IsConnected() bool
}
func IsServerSentEvent(headers http.Header) bool {
if contentType := headers.Get("content-type"); contentType != "" {
return strings.HasPrefix(strings.ToLower(contentType), "text/event-stream")
// Helper method to let the caller know what content-types should require a flush on every
// write to a ResponseWriter.
func shouldFlush(headers http.Header) bool {
if contentType := headers.Get(contentTypeHeader); contentType != "" {
contentType = strings.ToLower(contentType)
for _, c := range flushableContentTypes {
if strings.HasPrefix(contentType, c) {
return true
}
}
}
return false
}

View File

@ -6,12 +6,11 @@ import (
"io"
"math/rand"
"net/http"
"testing"
"time"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/cloudflare/cloudflared/tracing"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
"github.com/cloudflare/cloudflared/websocket"
)
@ -29,6 +28,8 @@ var (
testLargeResp = make([]byte, largeFileSize)
)
var _ ReadWriteAcker = (*HTTPResponseReadWriteAcker)(nil)
type testRequest struct {
name string
endpoint string
@ -41,6 +42,10 @@ type mockOrchestrator struct {
originProxy OriginProxy
}
func (mcr *mockOrchestrator) GetConfigJSON() ([]byte, error) {
return nil, fmt.Errorf("not implemented")
}
func (*mockOrchestrator) UpdateConfig(version int32, config []byte) *tunnelpogs.UpdateConfigurationResponse {
return &tunnelpogs.UpdateConfigurationResponse{
LastAppliedVersion: version,
@ -51,13 +56,18 @@ func (mcr *mockOrchestrator) GetOriginProxy() (OriginProxy, error) {
return mcr.originProxy, nil
}
func (mcr *mockOrchestrator) WarpRoutingEnabled() (enabled bool) {
return true
}
type mockOriginProxy struct{}
func (moc *mockOriginProxy) ProxyHTTP(
w ResponseWriter,
req *http.Request,
tr *tracing.TracedHTTPRequest,
isWebsocket bool,
) error {
req := tr.Request
if isWebsocket {
switch req.URL.Path {
case "/ws/echo":
@ -189,40 +199,3 @@ func (mcf mockConnectedFuse) Connected() {}
func (mcf mockConnectedFuse) IsConnected() bool {
return true
}
func TestIsEventStream(t *testing.T) {
tests := []struct {
headers http.Header
isEventStream bool
}{
{
headers: newHeader("Content-Type", "text/event-stream"),
isEventStream: true,
},
{
headers: newHeader("content-type", "text/event-stream"),
isEventStream: true,
},
{
headers: newHeader("Content-Type", "text/event-stream; charset=utf-8"),
isEventStream: true,
},
{
headers: newHeader("Content-Type", "application/json"),
isEventStream: false,
},
{
headers: http.Header{},
isEventStream: false,
},
}
for _, test := range tests {
assert.Equal(t, test.isEventStream, IsServerSentEvent(test.headers))
}
}
func newHeader(key, value string) http.Header {
header := http.Header{}
header.Add(key, value)
return header
}

View File

@ -2,7 +2,9 @@ package connection
import (
"context"
"fmt"
"io"
"net"
"time"
"github.com/rs/zerolog"
@ -19,6 +21,8 @@ type controlStream struct {
connectedFuse ConnectedFuse
namedTunnelProperties *NamedTunnelProperties
connIndex uint8
edgeAddress net.IP
protocol Protocol
newRPCClientFunc RPCClientFunc
@ -30,20 +34,26 @@ type controlStream struct {
// ControlStreamHandler registers connections with origintunneld and initiates graceful shutdown.
type ControlStreamHandler interface {
// ServeControlStream handles the control plane of the transport in the current goroutine calling this
ServeControlStream(ctx context.Context, rw io.ReadWriteCloser, connOptions *tunnelpogs.ConnectionOptions) error
ServeControlStream(ctx context.Context, rw io.ReadWriteCloser, connOptions *tunnelpogs.ConnectionOptions, tunnelConfigGetter TunnelConfigJSONGetter) error
// IsStopped tells whether the method above has finished
IsStopped() bool
}
type TunnelConfigJSONGetter interface {
GetConfigJSON() ([]byte, error)
}
// NewControlStream returns a new instance of ControlStreamHandler
func NewControlStream(
observer *Observer,
connectedFuse ConnectedFuse,
namedTunnelConfig *NamedTunnelProperties,
connIndex uint8,
edgeAddress net.IP,
newRPCClientFunc RPCClientFunc,
gracefulShutdownC <-chan struct{},
gracePeriod time.Duration,
protocol Protocol,
) ControlStreamHandler {
if newRPCClientFunc == nil {
newRPCClientFunc = newRegistrationRPCClient
@ -54,8 +64,10 @@ func NewControlStream(
namedTunnelProperties: namedTunnelConfig,
newRPCClientFunc: newRPCClientFunc,
connIndex: connIndex,
edgeAddress: edgeAddress,
gracefulShutdownC: gracefulShutdownC,
gracePeriod: gracePeriod,
protocol: protocol,
}
}
@ -63,15 +75,31 @@ func (c *controlStream) ServeControlStream(
ctx context.Context,
rw io.ReadWriteCloser,
connOptions *tunnelpogs.ConnectionOptions,
tunnelConfigGetter TunnelConfigJSONGetter,
) error {
rpcClient := c.newRPCClientFunc(ctx, rw, c.observer.log)
if err := rpcClient.RegisterConnection(ctx, c.namedTunnelProperties, connOptions, c.connIndex, c.observer); err != nil {
registrationDetails, err := rpcClient.RegisterConnection(ctx, c.namedTunnelProperties, connOptions, c.connIndex, c.edgeAddress, c.observer)
if err != nil {
rpcClient.Close()
return err
}
c.observer.logServerInfo(c.connIndex, registrationDetails.Location, c.edgeAddress, fmt.Sprintf("Connection %s registered", registrationDetails.UUID))
c.observer.sendConnectedEvent(c.connIndex, c.protocol, registrationDetails.Location)
c.connectedFuse.Connected()
// if conn index is 0 and tunnel is not remotely managed, then send local ingress rules configuration
if c.connIndex == 0 && !registrationDetails.TunnelIsRemotelyManaged {
if tunnelConfig, err := tunnelConfigGetter.GetConfigJSON(); err == nil {
if err := rpcClient.SendLocalConfiguration(ctx, tunnelConfig, c.observer); err != nil {
c.observer.log.Err(err).Msg("unable to send local configuration")
}
} else {
c.observer.log.Err(err).Msg("failed to obtain current configuration")
}
}
c.waitForUnregister(ctx, rpcClient)
return nil
}

View File

@ -18,6 +18,19 @@ func (e DupConnRegisterTunnelError) Error() string {
return "already connected to this server, trying another address"
}
// Dial to edge server with quic failed
type EdgeQuicDialError struct {
Cause error
}
func (e *EdgeQuicDialError) Error() string {
return "failed to dial to edge with quic: " + e.Cause.Error()
}
func (e *EdgeQuicDialError) Unwrap() error {
return e.Cause
}
// RegisterTunnel error from server
type ServerRegisterTunnelError struct {
Cause error

View File

@ -5,6 +5,7 @@ type Event struct {
Index uint8
EventType Status
Location string
Protocol Protocol
URL string
}

View File

@ -12,6 +12,7 @@ import (
"golang.org/x/sync/errgroup"
"github.com/cloudflare/cloudflared/h2mux"
"github.com/cloudflare/cloudflared/tracing"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
"github.com/cloudflare/cloudflared/websocket"
)
@ -68,6 +69,7 @@ func NewH2muxConnection(
connIndex uint8,
observer *Observer,
gracefulShutdownC <-chan struct{},
log *zerolog.Logger,
) (*h2muxConnection, error, bool) {
h := &h2muxConnection{
orchestrator: orchestrator,
@ -78,6 +80,7 @@ func NewH2muxConnection(
observer: observer,
gracefulShutdownC: gracefulShutdownC,
newRPCClientFunc: newRegistrationRPCClient,
log: log,
}
// Establish a muxed connection with the edge
@ -233,7 +236,7 @@ func (h *h2muxConnection) ServeStream(stream *h2mux.MuxedStream) error {
return err
}
err = originProxy.ProxyHTTP(respWriter, req, sourceConnectionType == TypeWebsocket)
err = originProxy.ProxyHTTP(respWriter, tracing.NewTracedHTTPRequest(req, h.log), sourceConnectionType == TypeWebsocket)
if err != nil {
respWriter.WriteErrorResponse()
}
@ -256,6 +259,10 @@ type h2muxRespWriter struct {
*h2mux.MuxedStream
}
func (rp *h2muxRespWriter) AddTrailer(trailerName, trailerValue string) {
// do nothing. we don't support trailers over h2mux
}
func (rp *h2muxRespWriter) WriteRespHeaders(status int, header http.Header) error {
headers := H1ResponseToH2ResponseHeaders(status, header)
headers = append(headers, h2mux.Header{Name: ResponseMetaHeader, Value: responseMetaHeaderOrigin})

View File

@ -47,8 +47,8 @@ func newH2MuxConnection(t require.TestingT) (*h2muxConnection, *h2mux.Muxer) {
edgeMuxChan <- edgeMux
}()
var connIndex = uint8(0)
testObserver := NewObserver(&log, &log, false)
h2muxConn, err, _ := NewH2muxConnection(testOrchestrator, testGracePeriod, testMuxerConfig, originConn, connIndex, testObserver, nil)
testObserver := NewObserver(&log, &log)
h2muxConn, err, _ := NewH2muxConnection(testOrchestrator, testGracePeriod, testMuxerConfig, originConn, connIndex, testObserver, nil, &log)
require.NoError(t, err)
return h2muxConn, <-edgeMuxChan
}

View File

@ -2,6 +2,7 @@ package connection
import (
"context"
gojson "encoding/json"
"fmt"
"io"
"net"
@ -14,6 +15,7 @@ import (
"github.com/rs/zerolog"
"golang.org/x/net/http2"
"github.com/cloudflare/cloudflared/tracing"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
@ -23,6 +25,7 @@ const (
InternalTCPProxySrcHeader = "Cf-Cloudflared-Proxy-Src"
WebsocketUpgrade = "websocket"
ControlStreamUpgrade = "control-stream"
ConfigurationUpdate = "update-configuration"
)
var errEdgeConnectionClosed = fmt.Errorf("connection with edge closed")
@ -100,7 +103,7 @@ func (c *HTTP2Connection) ServeHTTP(w http.ResponseWriter, r *http.Request) {
connType := determineHTTP2Type(r)
handleMissingRequestParts(connType, r)
respWriter, err := NewHTTP2RespWriter(r, w, connType)
respWriter, err := NewHTTP2RespWriter(r, w, connType, c.log)
if err != nil {
c.observer.log.Error().Msg(err.Error())
return
@ -114,15 +117,23 @@ func (c *HTTP2Connection) ServeHTTP(w http.ResponseWriter, r *http.Request) {
switch connType {
case TypeControlStream:
if err := c.controlStreamHandler.ServeControlStream(r.Context(), respWriter, c.connOptions); err != nil {
if err := c.controlStreamHandler.ServeControlStream(r.Context(), respWriter, c.connOptions, c.orchestrator); err != nil {
c.controlStreamErr = err
c.log.Error().Err(err)
respWriter.WriteErrorResponse()
}
case TypeConfiguration:
if err := c.handleConfigurationUpdate(respWriter, r); err != nil {
c.log.Error().Err(err)
respWriter.WriteErrorResponse()
}
case TypeWebsocket, TypeHTTP:
stripWebsocketUpgradeHeader(r)
if err := originProxy.ProxyHTTP(respWriter, r, connType == TypeWebsocket); err != nil {
// Check for tracing on request
tr := tracing.NewTracedHTTPRequest(r, c.log)
if err := originProxy.ProxyHTTP(respWriter, tr, connType == TypeWebsocket); err != nil {
err := fmt.Errorf("Failed to proxy HTTP: %w", err)
c.log.Error().Err(err)
respWriter.WriteErrorResponse()
@ -138,9 +149,10 @@ func (c *HTTP2Connection) ServeHTTP(w http.ResponseWriter, r *http.Request) {
rws := NewHTTPResponseReadWriterAcker(respWriter, r)
if err := originProxy.ProxyTCP(r.Context(), rws, &TCPRequest{
Dest: host,
CFRay: FindCfRayHeader(r),
LBProbe: IsLBProbeRequest(r),
Dest: host,
CFRay: FindCfRayHeader(r),
LBProbe: IsLBProbeRequest(r),
CfTraceID: r.Header.Get(tracing.TracerContextName),
}); err != nil {
respWriter.WriteErrorResponse()
}
@ -152,6 +164,26 @@ func (c *HTTP2Connection) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
// ConfigurationUpdateBody is the representation followed by the edge to send updates to cloudflared.
type ConfigurationUpdateBody struct {
Version int32 `json:"version"`
Config gojson.RawMessage `json:"config"`
}
func (c *HTTP2Connection) handleConfigurationUpdate(respWriter *http2RespWriter, r *http.Request) error {
var configBody ConfigurationUpdateBody
if err := json.NewDecoder(r.Body).Decode(&configBody); err != nil {
return err
}
resp := c.orchestrator.UpdateConfig(configBody.Version, configBody.Config)
bdy, err := json.Marshal(resp)
if err != nil {
return err
}
_, err = respWriter.Write(bdy)
return err
}
func (c *HTTP2Connection) close() {
// Wait for all serve HTTP handlers to return
c.activeRequestsWG.Wait()
@ -159,18 +191,21 @@ func (c *HTTP2Connection) close() {
}
type http2RespWriter struct {
r io.Reader
w http.ResponseWriter
flusher http.Flusher
shouldFlush bool
r io.Reader
w http.ResponseWriter
flusher http.Flusher
shouldFlush bool
statusWritten bool
log *zerolog.Logger
}
func NewHTTP2RespWriter(r *http.Request, w http.ResponseWriter, connType Type) (*http2RespWriter, error) {
func NewHTTP2RespWriter(r *http.Request, w http.ResponseWriter, connType Type, log *zerolog.Logger) (*http2RespWriter, error) {
flusher, isFlusher := w.(http.Flusher)
if !isFlusher {
respWriter := &http2RespWriter{
r: r.Body,
w: w,
r: r.Body,
w: w,
log: log,
}
respWriter.WriteErrorResponse()
return nil, fmt.Errorf("%T doesn't implement http.Flusher", w)
@ -181,14 +216,24 @@ func NewHTTP2RespWriter(r *http.Request, w http.ResponseWriter, connType Type) (
w: w,
flusher: flusher,
shouldFlush: connType.shouldFlush(),
log: log,
}, nil
}
func (rp *http2RespWriter) AddTrailer(trailerName, trailerValue string) {
if !rp.statusWritten {
rp.log.Warn().Msg("Tried to add Trailer to response before status written. Ignoring...")
return
}
rp.w.Header().Add(http2.TrailerPrefix+trailerName, trailerValue)
}
func (rp *http2RespWriter) WriteRespHeaders(status int, header http.Header) error {
dest := rp.w.Header()
userHeaders := make(http.Header, len(header))
for name, values := range header {
// Since these are http2 headers, they're required to be lowercase
// lowercase headers for simplicity check
h2name := strings.ToLower(name)
if h2name == "content-length" {
@ -197,6 +242,12 @@ func (rp *http2RespWriter) WriteRespHeaders(status int, header http.Header) erro
dest[name] = values
}
if h2name == tracing.IntCloudflaredTracingHeader {
// Add cf-int-cloudflared-tracing header outside of serialized userHeaders
dest[tracing.CanonicalCloudflaredTracingHeader] = values
continue
}
if !IsControlResponseHeader(h2name) || IsWebsocketClientHeader(h2name) {
// User headers, on the other hand, must all be serialized so that
// HTTP/2 header validation won't be applied to HTTP/1 header values
@ -206,18 +257,21 @@ func (rp *http2RespWriter) WriteRespHeaders(status int, header http.Header) erro
// Perform user header serialization and set them in the single header
dest.Set(CanonicalResponseUserHeaders, SerializeHeaders(userHeaders))
rp.setResponseMetaHeader(responseMetaHeaderOrigin)
// HTTP2 removes support for 101 Switching Protocols https://tools.ietf.org/html/rfc7540#section-8.1.1
if status == http.StatusSwitchingProtocols {
status = http.StatusOK
}
rp.w.WriteHeader(status)
if IsServerSentEvent(header) {
if shouldFlush(header) {
rp.shouldFlush = true
}
if rp.shouldFlush {
rp.flusher.Flush()
}
rp.statusWritten = true
return nil
}
@ -239,7 +293,7 @@ func (rp *http2RespWriter) Write(p []byte) (n int, err error) {
// Implementer of OriginClient should make sure it doesn't write to the connection after Proxy returns
// Register a recover routine just in case.
if r := recover(); r != nil {
println(fmt.Sprintf("Recover from http2 response writer panic, error %s", debug.Stack()))
rp.log.Debug().Msgf("Recover from http2 response writer panic, error %s", debug.Stack())
}
}()
n, err = rp.w.Write(p)
@ -255,6 +309,8 @@ func (rp *http2RespWriter) Close() error {
func determineHTTP2Type(r *http.Request) Type {
switch {
case isConfigurationUpdate(r):
return TypeConfiguration
case isWebsocketUpgrade(r):
return TypeWebsocket
case IsTCPStream(r):
@ -288,6 +344,10 @@ func isWebsocketUpgrade(r *http.Request) bool {
return r.Header.Get(InternalUpgradeHeader) == WebsocketUpgrade
}
func isConfigurationUpdate(r *http.Request) bool {
return r.Header.Get(InternalUpgradeHeader) == ConfigurationUpdate
}
// IsTCPStream discerns if the connection request needs a tcp stream proxy.
func IsTCPStream(r *http.Request) bool {
return r.Header.Get(InternalTCPProxySrcHeader) != ""

View File

@ -1,6 +1,7 @@
package connection
import (
"bytes"
"context"
"errors"
"fmt"
@ -14,6 +15,7 @@ import (
"time"
"github.com/gobwas/ws/wsutil"
"github.com/google/uuid"
"github.com/rs/zerolog"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
@ -31,7 +33,7 @@ func newTestHTTP2Connection() (*HTTP2Connection, net.Conn) {
edgeConn, cfdConn := net.Pipe()
var connIndex = uint8(0)
log := zerolog.Nop()
obs := NewObserver(&log, &log, false)
obs := NewObserver(&log, &log)
controlStream := NewControlStream(
obs,
mockConnectedFuse{},
@ -39,7 +41,9 @@ func newTestHTTP2Connection() (*HTTP2Connection, net.Conn) {
connIndex,
nil,
nil,
nil,
1*time.Second,
HTTP2,
)
return NewHTTP2Connection(
cfdConn,
@ -53,6 +57,41 @@ func newTestHTTP2Connection() (*HTTP2Connection, net.Conn) {
), edgeConn
}
func TestHTTP2ConfigurationSet(t *testing.T) {
http2Conn, edgeConn := newTestHTTP2Connection()
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
http2Conn.Serve(ctx)
}()
edgeHTTP2Conn, err := testTransport.NewClientConn(edgeConn)
require.NoError(t, err)
endpoint := fmt.Sprintf("http://localhost:8080/ok")
reqBody := []byte(`{
"version": 2,
"config": {"warp-routing": {"enabled": true}, "originRequest" : {"connectTimeout": 10}, "ingress" : [ {"hostname": "test", "service": "https://localhost:8000" } , {"service": "http_status:404"} ]}}
`)
reader := bytes.NewReader(reqBody)
req, err := http.NewRequestWithContext(ctx, http.MethodPut, endpoint, reader)
require.NoError(t, err)
req.Header.Set(InternalUpgradeHeader, ConfigurationUpdate)
resp, err := edgeHTTP2Conn.RoundTrip(req)
require.NoError(t, err)
require.Equal(t, http.StatusOK, resp.StatusCode)
bdy, err := ioutil.ReadAll(resp.Body)
require.NoError(t, err)
assert.Equal(t, `{"lastAppliedVersion":2,"err":null}`, string(bdy))
cancel()
wg.Wait()
}
func TestServeHTTP(t *testing.T) {
tests := []testRequest{
{
@ -130,18 +169,27 @@ type mockNamedTunnelRPCClient struct {
unregistered chan struct{}
}
func (mc mockNamedTunnelRPCClient) SendLocalConfiguration(c context.Context, config []byte, observer *Observer) error {
return nil
}
func (mc mockNamedTunnelRPCClient) RegisterConnection(
c context.Context,
properties *NamedTunnelProperties,
options *tunnelpogs.ConnectionOptions,
connIndex uint8,
edgeAddress net.IP,
observer *Observer,
) error {
) (*tunnelpogs.ConnectionDetails, error) {
if mc.shouldFail != nil {
return mc.shouldFail
return nil, mc.shouldFail
}
close(mc.registered)
return nil
return &tunnelpogs.ConnectionDetails{
Location: "LIS",
UUID: uuid.New(),
TunnelIsRemotelyManaged: false,
}, nil
}
func (mc mockNamedTunnelRPCClient) GracefulShutdown(ctx context.Context, gracePeriod time.Duration) {
@ -309,15 +357,17 @@ func TestServeControlStream(t *testing.T) {
unregistered: make(chan struct{}),
}
obs := NewObserver(&log, &log, false)
obs := NewObserver(&log, &log)
controlStream := NewControlStream(
obs,
mockConnectedFuse{},
&NamedTunnelProperties{},
1,
nil,
rpcClientFactory.newMockRPCClient,
nil,
1*time.Second,
HTTP2,
)
http2Conn.controlStreamHandler = controlStream
@ -359,15 +409,17 @@ func TestFailRegistration(t *testing.T) {
unregistered: make(chan struct{}),
}
obs := NewObserver(&log, &log, false)
obs := NewObserver(&log, &log)
controlStream := NewControlStream(
obs,
mockConnectedFuse{},
&NamedTunnelProperties{},
http2Conn.connIndex,
nil,
rpcClientFactory.newMockRPCClient,
nil,
1*time.Second,
HTTP2,
)
http2Conn.controlStreamHandler = controlStream
@ -404,16 +456,18 @@ func TestGracefulShutdownHTTP2(t *testing.T) {
events := &eventCollectorSink{}
shutdownC := make(chan struct{})
obs := NewObserver(&log, &log, false)
obs := NewObserver(&log, &log)
obs.RegisterSink(events)
controlStream := NewControlStream(
obs,
mockConnectedFuse{},
&NamedTunnelProperties{},
http2Conn.connIndex,
nil,
rpcClientFactory.newMockRPCClient,
shutdownC,
1*time.Second,
HTTP2,
)
http2Conn.controlStreamHandler = controlStream
@ -441,7 +495,7 @@ func TestGracefulShutdownHTTP2(t *testing.T) {
select {
case <-rpcClientFactory.registered:
break //ok
break // ok
case <-time.Tick(time.Second):
t.Fatal("timeout out waiting for registration")
}
@ -451,7 +505,7 @@ func TestGracefulShutdownHTTP2(t *testing.T) {
select {
case <-rpcClientFactory.unregistered:
break //ok
break // ok
case <-time.Tick(time.Second):
t.Fatal("timeout out waiting for unregistered signal")
}

View File

@ -13,6 +13,7 @@ const (
MetricsNamespace = "cloudflared"
TunnelSubsystem = "tunnel"
muxerSubsystem = "muxer"
configSubsystem = "config"
)
type muxerMetrics struct {
@ -36,6 +37,11 @@ type muxerMetrics struct {
compRateAve *prometheus.GaugeVec
}
type localConfigMetrics struct {
pushes prometheus.Counter
pushesErrors prometheus.Counter
}
type tunnelMetrics struct {
timerRetries prometheus.Gauge
serverLocations *prometheus.GaugeVec
@ -51,6 +57,39 @@ type tunnelMetrics struct {
muxerMetrics *muxerMetrics
tunnelsHA tunnelsForHA
userHostnamesCounts *prometheus.CounterVec
localConfigMetrics *localConfigMetrics
}
func newLocalConfigMetrics() *localConfigMetrics {
pushesMetric := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: MetricsNamespace,
Subsystem: configSubsystem,
Name: "local_config_pushes",
Help: "Number of local configuration pushes to the edge",
},
)
pushesErrorsMetric := prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: MetricsNamespace,
Subsystem: configSubsystem,
Name: "local_config_pushes_errors",
Help: "Number of errors occurred during local configuration pushes",
},
)
prometheus.MustRegister(
pushesMetric,
pushesErrorsMetric,
)
return &localConfigMetrics{
pushes: pushesMetric,
pushesErrors: pushesErrorsMetric,
}
}
func newMuxerMetrics() *muxerMetrics {
@ -328,7 +367,7 @@ func initTunnelMetrics() *tunnelMetrics {
Name: "server_locations",
Help: "Where each tunnel is connected to. 1 means current location, 0 means previous locations.",
},
[]string{"connection_id", "location"},
[]string{"connection_id", "edge_location"},
)
prometheus.MustRegister(serverLocations)
@ -386,6 +425,7 @@ func initTunnelMetrics() *tunnelMetrics {
regFail: registerFail,
rpcFail: rpcFail,
userHostnamesCounts: userHostnamesCounts,
localConfigMetrics: newLocalConfigMetrics(),
}
}

View File

@ -1,6 +1,7 @@
package connection
import (
"net"
"strings"
"github.com/rs/zerolog"
@ -8,6 +9,7 @@ import (
const (
LogFieldLocation = "location"
LogFieldIPAddress = "ip"
observerChannelBufferSize = 16
)
@ -16,7 +18,6 @@ type Observer struct {
logTransport *zerolog.Logger
metrics *tunnelMetrics
tunnelEventChan chan Event
uiEnabled bool
addSinkChan chan EventSink
}
@ -24,12 +25,11 @@ type EventSink interface {
OnTunnelEvent(event Event)
}
func NewObserver(log, logTransport *zerolog.Logger, uiEnabled bool) *Observer {
func NewObserver(log, logTransport *zerolog.Logger) *Observer {
o := &Observer{
log: log,
logTransport: logTransport,
metrics: newTunnelMetrics(),
uiEnabled: uiEnabled,
tunnelEventChan: make(chan Event, observerChannelBufferSize),
addSinkChan: make(chan EventSink, observerChannelBufferSize),
}
@ -41,11 +41,12 @@ func (o *Observer) RegisterSink(sink EventSink) {
o.addSinkChan <- sink
}
func (o *Observer) logServerInfo(connIndex uint8, location, msg string) {
func (o *Observer) logServerInfo(connIndex uint8, location string, address net.IP, msg string) {
o.sendEvent(Event{Index: connIndex, EventType: Connected, Location: location})
o.log.Info().
Uint8(LogFieldConnIndex, connIndex).
Str(LogFieldLocation, location).
IPAddr(LogFieldIPAddress, address).
Msg(msg)
o.metrics.registerServerLocation(uint8ToString(connIndex), location)
}
@ -54,8 +55,8 @@ func (o *Observer) sendRegisteringEvent(connIndex uint8) {
o.sendEvent(Event{Index: connIndex, EventType: RegisteringTunnel})
}
func (o *Observer) sendConnectedEvent(connIndex uint8, location string) {
o.sendEvent(Event{Index: connIndex, EventType: Connected, Location: location})
func (o *Observer) sendConnectedEvent(connIndex uint8, protocol Protocol, location string) {
o.sendEvent(Event{Index: connIndex, EventType: Connected, Protocol: protocol, Location: location})
}
func (o *Observer) SendURL(url string) {

View File

@ -12,7 +12,7 @@ import (
)
func TestSendUrl(t *testing.T) {
observer := NewObserver(&log, &log, false)
observer := NewObserver(&log, &log)
observer.SendURL("my-url.com")
assert.Equal(t, 1.0, getCounterValue(t, observer.metrics.userHostnamesCounts, "https://my-url.com"))
@ -63,7 +63,7 @@ func TestRegisterServerLocation(t *testing.T) {
}
func TestObserverEventsDontBlock(t *testing.T) {
observer := NewObserver(&log, &log, false)
observer := NewObserver(&log, &log)
var mu sync.Mutex
observer.RegisterSink(EventSinkFunc(func(_ Event) {
// callback will block if lock is already held

View File

@ -1,6 +1,7 @@
package connection
import (
"errors"
"fmt"
"hash/fnv"
"sync"
@ -19,7 +20,7 @@ const (
edgeH2TLSServerName = "h2.cftunnel.com"
// edgeQUICServerName is the server name to establish quic connection with edge.
edgeQUICServerName = "quic.cftunnel.com"
autoSelectFlag = "auto"
AutoSelectFlag = "auto"
)
var (
@ -130,6 +131,7 @@ type autoProtocolSelector struct {
refreshAfter time.Time
ttl time.Duration
log *zerolog.Logger
needPQ bool
}
func newAutoProtocolSelector(
@ -139,6 +141,7 @@ func newAutoProtocolSelector(
fetchFunc PercentageFetcher,
ttl time.Duration,
log *zerolog.Logger,
needPQ bool,
) *autoProtocolSelector {
return &autoProtocolSelector{
current: current,
@ -148,6 +151,7 @@ func newAutoProtocolSelector(
refreshAfter: time.Now().Add(ttl),
ttl: ttl,
log: log,
needPQ: needPQ,
}
}
@ -187,6 +191,9 @@ func getProtocol(protocolPool []Protocol, fetchFunc PercentageFetcher, switchThr
func (s *autoProtocolSelector) Fallback() (Protocol, bool) {
s.lock.RLock()
defer s.lock.RUnlock()
if s.needPQ {
return 0, false
}
return s.current.fallback()
}
@ -199,9 +206,14 @@ func NewProtocolSelector(
fetchFunc PercentageFetcher,
ttl time.Duration,
log *zerolog.Logger,
needPQ bool,
) (ProtocolSelector, error) {
// Classic tunnel is only supported with h2mux
if namedTunnel == nil {
if needPQ {
return nil, errors.New("Classic tunnel does not support post-quantum")
}
return &staticProtocolSelector{
current: H2mux,
}, nil
@ -209,8 +221,11 @@ func NewProtocolSelector(
threshold := switchThreshold(namedTunnel.Credentials.AccountTag)
fetchedProtocol, err := getProtocol([]Protocol{QUIC, HTTP2}, fetchFunc, threshold)
if err != nil {
log.Err(err).Msg("Unable to lookup protocol. Defaulting to `http2`. If this fails, you can set `--protocol h2mux` in your cloudflared command.")
if err != nil && protocolFlag == "auto" {
log.Err(err).Msg("Unable to lookup protocol. Defaulting to `http2`. If this fails, you can attempt `--protocol quic` instead.")
if needPQ {
return nil, errors.New("http2 does not support post-quantum")
}
return &staticProtocolSelector{
current: HTTP2,
}, nil
@ -221,10 +236,10 @@ func NewProtocolSelector(
protocolFlag = HTTP2.String()
fetchedProtocol = HTTP2Warp
}
return selectWarpRoutingProtocols(protocolFlag, fetchFunc, ttl, log, threshold, fetchedProtocol)
return selectWarpRoutingProtocols(protocolFlag, fetchFunc, ttl, log, threshold, fetchedProtocol, needPQ)
}
return selectNamedTunnelProtocols(protocolFlag, fetchFunc, ttl, log, threshold, fetchedProtocol)
return selectNamedTunnelProtocols(protocolFlag, fetchFunc, ttl, log, threshold, fetchedProtocol, needPQ)
}
func selectNamedTunnelProtocols(
@ -234,6 +249,7 @@ func selectNamedTunnelProtocols(
log *zerolog.Logger,
threshold int32,
protocol Protocol,
needPQ bool,
) (ProtocolSelector, error) {
// If the user picks a protocol, then we stick to it no matter what.
switch protocolFlag {
@ -247,8 +263,8 @@ func selectNamedTunnelProtocols(
// If the user does not pick (hopefully the majority) then we use the one derived from the TXT DNS record and
// fallback on failures.
if protocolFlag == autoSelectFlag {
return newAutoProtocolSelector(protocol, []Protocol{QUIC, HTTP2, H2mux}, threshold, fetchFunc, ttl, log), nil
if protocolFlag == AutoSelectFlag {
return newAutoProtocolSelector(protocol, []Protocol{QUIC, HTTP2, H2mux}, threshold, fetchFunc, ttl, log, needPQ), nil
}
return nil, fmt.Errorf("Unknown protocol %s, %s", protocolFlag, AvailableProtocolFlagMessage)
@ -261,6 +277,7 @@ func selectWarpRoutingProtocols(
log *zerolog.Logger,
threshold int32,
protocol Protocol,
needPQ bool,
) (ProtocolSelector, error) {
// If the user picks a protocol, then we stick to it no matter what.
switch protocolFlag {
@ -272,8 +289,8 @@ func selectWarpRoutingProtocols(
// If the user does not pick (hopefully the majority) then we use the one derived from the TXT DNS record and
// fallback on failures.
if protocolFlag == autoSelectFlag {
return newAutoProtocolSelector(protocol, []Protocol{QUICWarp, HTTP2Warp}, threshold, fetchFunc, ttl, log), nil
if protocolFlag == AutoSelectFlag {
return newAutoProtocolSelector(protocol, []Protocol{QUICWarp, HTTP2Warp}, threshold, fetchFunc, ttl, log, needPQ), nil
}
return nil, fmt.Errorf("Unknown protocol %s, %s", protocolFlag, AvailableProtocolFlagMessage)

View File

@ -91,14 +91,14 @@ func TestNewProtocolSelector(t *testing.T) {
},
{
name: "named tunnel quic and http2 disabled",
protocol: "auto",
protocol: AutoSelectFlag,
expectedProtocol: H2mux,
fetchFunc: mockFetcher(false, edgediscovery.ProtocolPercent{Protocol: "http2", Percentage: -1}, edgediscovery.ProtocolPercent{Protocol: "quic", Percentage: -1}),
namedTunnelConfig: testNamedTunnelProperties,
},
{
name: "named tunnel quic disabled",
protocol: "auto",
protocol: AutoSelectFlag,
expectedProtocol: HTTP2,
// Hasfallback true is because if http2 fails, then we further fallback to h2mux.
hasFallback: true,
@ -108,21 +108,21 @@ func TestNewProtocolSelector(t *testing.T) {
},
{
name: "named tunnel auto all http2 disabled",
protocol: "auto",
protocol: AutoSelectFlag,
expectedProtocol: H2mux,
fetchFunc: mockFetcher(false, edgediscovery.ProtocolPercent{Protocol: "http2", Percentage: -1}),
namedTunnelConfig: testNamedTunnelProperties,
},
{
name: "named tunnel auto to h2mux",
protocol: "auto",
protocol: AutoSelectFlag,
expectedProtocol: H2mux,
fetchFunc: mockFetcher(false, edgediscovery.ProtocolPercent{Protocol: "http2", Percentage: 0}),
namedTunnelConfig: testNamedTunnelProperties,
},
{
name: "named tunnel auto to http2",
protocol: "auto",
protocol: AutoSelectFlag,
expectedProtocol: HTTP2,
hasFallback: true,
expectedFallback: H2mux,
@ -131,7 +131,7 @@ func TestNewProtocolSelector(t *testing.T) {
},
{
name: "named tunnel auto to quic",
protocol: "auto",
protocol: AutoSelectFlag,
expectedProtocol: QUIC,
hasFallback: true,
expectedFallback: HTTP2,
@ -167,7 +167,7 @@ func TestNewProtocolSelector(t *testing.T) {
},
{
name: "warp routing quic",
protocol: "auto",
protocol: AutoSelectFlag,
expectedProtocol: QUICWarp,
hasFallback: true,
expectedFallback: HTTP2Warp,
@ -177,7 +177,7 @@ func TestNewProtocolSelector(t *testing.T) {
},
{
name: "warp routing auto",
protocol: "auto",
protocol: AutoSelectFlag,
expectedProtocol: HTTP2Warp,
hasFallback: false,
fetchFunc: mockFetcher(false, edgediscovery.ProtocolPercent{Protocol: "http2", Percentage: 100}),
@ -186,7 +186,7 @@ func TestNewProtocolSelector(t *testing.T) {
},
{
name: "warp routing auto- quic",
protocol: "auto",
protocol: AutoSelectFlag,
expectedProtocol: QUICWarp,
hasFallback: true,
expectedFallback: HTTP2Warp,
@ -209,7 +209,7 @@ func TestNewProtocolSelector(t *testing.T) {
},
{
name: "named tunnel fetch error",
protocol: "auto",
protocol: AutoSelectFlag,
fetchFunc: mockFetcher(true),
namedTunnelConfig: testNamedTunnelProperties,
expectedProtocol: HTTP2,
@ -219,7 +219,7 @@ func TestNewProtocolSelector(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
selector, err := NewProtocolSelector(test.protocol, test.warpRoutingEnabled, test.namedTunnelConfig, test.fetchFunc, testNoTTL, &log)
selector, err := NewProtocolSelector(test.protocol, test.warpRoutingEnabled, test.namedTunnelConfig, test.fetchFunc, testNoTTL, &log, false)
if test.wantErr {
assert.Error(t, err, fmt.Sprintf("test %s failed", test.name))
} else {
@ -237,7 +237,7 @@ func TestNewProtocolSelector(t *testing.T) {
func TestAutoProtocolSelectorRefresh(t *testing.T) {
fetcher := dynamicMockFetcher{}
selector, err := NewProtocolSelector("auto", noWarpRoutingEnabled, testNamedTunnelProperties, fetcher.fetch(), testNoTTL, &log)
selector, err := NewProtocolSelector(AutoSelectFlag, noWarpRoutingEnabled, testNamedTunnelProperties, fetcher.fetch(), testNoTTL, &log, false)
assert.NoError(t, err)
assert.Equal(t, H2mux, selector.Current())
@ -267,7 +267,7 @@ func TestAutoProtocolSelectorRefresh(t *testing.T) {
func TestHTTP2ProtocolSelectorRefresh(t *testing.T) {
fetcher := dynamicMockFetcher{}
// Since the user chooses http2 on purpose, we always stick to it.
selector, err := NewProtocolSelector("http2", noWarpRoutingEnabled, testNamedTunnelProperties, fetcher.fetch(), testNoTTL, &log)
selector, err := NewProtocolSelector("http2", noWarpRoutingEnabled, testNamedTunnelProperties, fetcher.fetch(), testNoTTL, &log, false)
assert.NoError(t, err)
assert.Equal(t, HTTP2, selector.Current())
@ -297,7 +297,7 @@ func TestHTTP2ProtocolSelectorRefresh(t *testing.T) {
func TestProtocolSelectorRefreshTTL(t *testing.T) {
fetcher := dynamicMockFetcher{}
fetcher.protocolPercents = edgediscovery.ProtocolPercents{edgediscovery.ProtocolPercent{Protocol: "quic", Percentage: 100}}
selector, err := NewProtocolSelector("auto", noWarpRoutingEnabled, testNamedTunnelProperties, fetcher.fetch(), time.Hour, &log)
selector, err := NewProtocolSelector(AutoSelectFlag, noWarpRoutingEnabled, testNamedTunnelProperties, fetcher.fetch(), time.Hour, &log, false)
assert.NoError(t, err)
assert.Equal(t, QUIC, selector.Current())

View File

@ -7,19 +7,25 @@ import (
"io"
"net"
"net/http"
"net/netip"
"strconv"
"strings"
"sync/atomic"
"time"
"github.com/google/uuid"
"github.com/lucas-clemente/quic-go"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"golang.org/x/sync/errgroup"
"github.com/cloudflare/cloudflared/datagramsession"
"github.com/cloudflare/cloudflared/ingress"
"github.com/cloudflare/cloudflared/packet"
quicpogs "github.com/cloudflare/cloudflared/quic"
"github.com/cloudflare/cloudflared/tracing"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
@ -30,14 +36,22 @@ const (
HTTPMethodKey = "HttpMethod"
// HTTPHostKey is used to get or set http Method in QUIC ALPN if the underlying proxy connection type is HTTP.
HTTPHostKey = "HttpHost"
QUICMetadataFlowID = "FlowID"
// emperically this capacity has been working well
demuxChanCapacity = 16
)
// QUICConnection represents the type that facilitates Proxying via QUIC streams.
type QUICConnection struct {
session quic.Session
logger *zerolog.Logger
orchestrator Orchestrator
sessionManager datagramsession.Manager
session quic.Connection
logger *zerolog.Logger
orchestrator Orchestrator
// sessionManager tracks active sessions. It receives datagrams from quic connection via datagramMuxer
sessionManager datagramsession.Manager
// datagramMuxer mux/demux datagrams from quic connection
datagramMuxer *quicpogs.DatagramMuxerV2
packetRouter *packet.Router
controlStreamHandler ControlStreamHandler
connOptions *tunnelpogs.ConnectionOptions
}
@ -51,24 +65,25 @@ func NewQUICConnection(
connOptions *tunnelpogs.ConnectionOptions,
controlStreamHandler ControlStreamHandler,
logger *zerolog.Logger,
packetRouterConfig *packet.GlobalRouterConfig,
) (*QUICConnection, error) {
session, err := quic.DialAddr(edgeAddr.String(), tlsConfig, quicConfig)
if err != nil {
return nil, fmt.Errorf("failed to dial to edge: %w", err)
return nil, &EdgeQuicDialError{Cause: err}
}
datagramMuxer, err := quicpogs.NewDatagramMuxer(session)
if err != nil {
return nil, err
}
sessionManager := datagramsession.NewManager(datagramMuxer, logger)
sessionDemuxChan := make(chan *packet.Session, demuxChanCapacity)
datagramMuxer := quicpogs.NewDatagramMuxerV2(session, logger, sessionDemuxChan)
sessionManager := datagramsession.NewManager(logger, datagramMuxer.SendToSession, sessionDemuxChan)
packetRouter := packet.NewRouter(packetRouterConfig, datagramMuxer, &returnPipe{muxer: datagramMuxer}, logger, orchestrator.WarpRoutingEnabled)
return &QUICConnection{
session: session,
orchestrator: orchestrator,
logger: logger,
sessionManager: sessionManager,
datagramMuxer: datagramMuxer,
packetRouter: packetRouter,
controlStreamHandler: controlStreamHandler,
connOptions: connOptions,
}, nil
@ -104,13 +119,21 @@ func (q *QUICConnection) Serve(ctx context.Context) error {
defer cancel()
return q.sessionManager.Serve(ctx)
})
errGroup.Go(func() error {
defer cancel()
return q.datagramMuxer.ServeReceive(ctx)
})
errGroup.Go(func() error {
defer cancel()
return q.packetRouter.Serve(ctx)
})
return errGroup.Wait()
}
func (q *QUICConnection) serveControlStream(ctx context.Context, controlStream quic.Stream) error {
// This blocks until the control plane is done.
err := q.controlStreamHandler.ServeControlStream(ctx, controlStream, q.connOptions)
err := q.controlStreamHandler.ServeControlStream(ctx, controlStream, q.connOptions, q.orchestrator)
if err != nil {
// Not wrapping error here to be consistent with the http2 message.
return err
@ -119,6 +142,11 @@ func (q *QUICConnection) serveControlStream(ctx context.Context, controlStream q
return nil
}
// Close closes the session with no errors specified.
func (q *QUICConnection) Close() {
q.session.CloseWithError(0, "")
}
func (q *QUICConnection) acceptStream(ctx context.Context) error {
defer q.Close()
for {
@ -130,23 +158,26 @@ func (q *QUICConnection) acceptStream(ctx context.Context) error {
}
return fmt.Errorf("failed to accept QUIC stream: %w", err)
}
go func() {
stream := quicpogs.NewSafeStreamCloser(quicStream)
defer stream.Close()
if err = q.handleStream(stream); err != nil {
q.logger.Err(err).Msg("Failed to handle QUIC stream")
}
}()
go q.runStream(quicStream)
}
}
// Close closes the session with no errors specified.
func (q *QUICConnection) Close() {
q.session.CloseWithError(0, "")
func (q *QUICConnection) runStream(quicStream quic.Stream) {
ctx := quicStream.Context()
stream := quicpogs.NewSafeStreamCloser(quicStream)
defer stream.Close()
// we are going to fuse readers/writers from stream <- cloudflared -> origin, and we want to guarantee that
// code executed in the code path of handleStream don't trigger an earlier close to the downstream write stream.
// So, we wrap the stream with a no-op write closer and only this method can actually close write side of the stream.
// A call to close will simulate a close to the read-side, which will fail subsequent reads.
noCloseStream := &nopCloserReadWriter{ReadWriteCloser: stream}
if err := q.handleStream(ctx, noCloseStream); err != nil {
q.logger.Err(err).Msg("Failed to handle QUIC stream")
}
}
func (q *QUICConnection) handleStream(stream io.ReadWriteCloser) error {
func (q *QUICConnection) handleStream(ctx context.Context, stream io.ReadWriteCloser) error {
signature, err := quicpogs.DetermineProtocol(stream)
if err != nil {
return err
@ -155,9 +186,9 @@ func (q *QUICConnection) handleStream(stream io.ReadWriteCloser) error {
case quicpogs.DataStreamProtocolSignature:
reqServerStream, err := quicpogs.NewRequestServerStream(stream, signature)
if err != nil {
return nil
return err
}
return q.handleDataStream(reqServerStream)
return q.handleDataStream(ctx, reqServerStream)
case quicpogs.RPCStreamProtocolSignature:
rpcStream, err := quicpogs.NewRPCServerStream(stream, signature)
if err != nil {
@ -169,28 +200,43 @@ func (q *QUICConnection) handleStream(stream io.ReadWriteCloser) error {
}
}
func (q *QUICConnection) handleDataStream(stream *quicpogs.RequestServerStream) error {
connectRequest, err := stream.ReadConnectRequestData()
func (q *QUICConnection) handleDataStream(ctx context.Context, stream *quicpogs.RequestServerStream) error {
request, err := stream.ReadConnectRequestData()
if err != nil {
return err
}
if err := q.dispatchRequest(ctx, stream, err, request); err != nil {
_ = stream.WriteConnectResponseData(err)
q.logger.Err(err).Str("type", request.Type.String()).Str("dest", request.Dest).Msg("Request failed")
}
return nil
}
func (q *QUICConnection) dispatchRequest(ctx context.Context, stream *quicpogs.RequestServerStream, err error, request *quicpogs.ConnectRequest) error {
originProxy, err := q.orchestrator.GetOriginProxy()
if err != nil {
return err
}
switch connectRequest.Type {
switch request.Type {
case quicpogs.ConnectionTypeHTTP, quicpogs.ConnectionTypeWebsocket:
req, err := buildHTTPRequest(connectRequest, stream)
tracedReq, err := buildHTTPRequest(ctx, request, stream, q.logger)
if err != nil {
return err
}
w := newHTTPResponseAdapter(stream)
return originProxy.ProxyHTTP(w, req, connectRequest.Type == quicpogs.ConnectionTypeWebsocket)
return originProxy.ProxyHTTP(w, tracedReq, request.Type == quicpogs.ConnectionTypeWebsocket)
case quicpogs.ConnectionTypeTCP:
rwa := &streamReadWriteAcker{stream}
return originProxy.ProxyTCP(context.Background(), rwa, &TCPRequest{Dest: connectRequest.Dest})
metadata := request.MetadataMap()
return originProxy.ProxyTCP(ctx, rwa, &TCPRequest{
Dest: request.Dest,
FlowID: metadata[QUICMetadataFlowID],
CfTraceID: metadata[tracing.TracerContextName],
})
}
return nil
}
@ -200,24 +246,42 @@ func (q *QUICConnection) handleRPCStream(rpcStream *quicpogs.RPCServerStream) er
}
// RegisterUdpSession is the RPC method invoked by edge to register and run a session
func (q *QUICConnection) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeAfterIdleHint time.Duration) error {
func (q *QUICConnection) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeAfterIdleHint time.Duration, traceContext string) (*tunnelpogs.RegisterUdpSessionResponse, error) {
traceCtx := tracing.NewTracedContext(ctx, traceContext, q.logger)
ctx, registerSpan := traceCtx.Tracer().Start(traceCtx, "register-session", trace.WithAttributes(
attribute.String("session-id", sessionID.String()),
attribute.String("dst", fmt.Sprintf("%s:%d", dstIP, dstPort)),
))
// Each session is a series of datagram from an eyeball to a dstIP:dstPort.
// (src port, dst IP, dst port) uniquely identifies a session, so it needs a dedicated connected socket.
originProxy, err := ingress.DialUDP(dstIP, dstPort)
if err != nil {
q.logger.Err(err).Msgf("Failed to create udp proxy to %s:%d", dstIP, dstPort)
return err
tracing.EndWithErrorStatus(registerSpan, err)
return nil, err
}
registerSpan.SetAttributes(
attribute.Bool("socket-bind-success", true),
attribute.String("src", originProxy.LocalAddr().String()),
)
session, err := q.sessionManager.RegisterSession(ctx, sessionID, originProxy)
if err != nil {
q.logger.Err(err).Str("sessionID", sessionID.String()).Msgf("Failed to register udp session")
return err
tracing.EndWithErrorStatus(registerSpan, err)
return nil, err
}
go q.serveUDPSession(session, closeAfterIdleHint)
q.logger.Debug().Msgf("Registered session %v, %v, %v", sessionID, dstIP, dstPort)
return nil
q.logger.Debug().Str("sessionID", sessionID.String()).Str("src", originProxy.LocalAddr().String()).Str("dst", fmt.Sprintf("%s:%d", dstIP, dstPort)).Msgf("Registered session")
tracing.End(registerSpan)
resp := tunnelpogs.RegisterUdpSessionResponse{
Spans: traceCtx.GetProtoSpans(),
}
return &resp, nil
}
func (q *QUICConnection) serveUDPSession(session *datagramsession.Session, closeAfterIdleHint time.Duration) {
@ -276,8 +340,12 @@ type streamReadWriteAcker struct {
}
// AckConnection acks response back to the proxy.
func (s *streamReadWriteAcker) AckConnection() error {
return s.WriteConnectResponseData(nil)
func (s *streamReadWriteAcker) AckConnection(tracePropagation string) error {
metadata := quicpogs.Metadata{
Key: tracing.CanonicalCloudflaredTracingHeader,
Val: tracePropagation,
}
return s.WriteConnectResponseData(nil, metadata)
}
// httpResponseAdapter translates responses written by the HTTP Proxy into ones that can be used in QUIC.
@ -289,6 +357,10 @@ func newHTTPResponseAdapter(s *quicpogs.RequestServerStream) httpResponseAdapter
return httpResponseAdapter{s}
}
func (hrw httpResponseAdapter) AddTrailer(trailerName, trailerValue string) {
// we do not support trailers over QUIC
}
func (hrw httpResponseAdapter) WriteRespHeaders(status int, header http.Header) error {
metadata := make([]quicpogs.Metadata, 0)
metadata = append(metadata, quicpogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(status)})
@ -305,14 +377,19 @@ func (hrw httpResponseAdapter) WriteErrorResponse(err error) {
hrw.WriteConnectResponseData(err, quicpogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(http.StatusBadGateway)})
}
func buildHTTPRequest(connectRequest *quicpogs.ConnectRequest, body io.ReadCloser) (*http.Request, error) {
func buildHTTPRequest(
ctx context.Context,
connectRequest *quicpogs.ConnectRequest,
body io.ReadCloser,
log *zerolog.Logger,
) (*tracing.TracedHTTPRequest, error) {
metadata := connectRequest.MetadataMap()
dest := connectRequest.Dest
method := metadata[HTTPMethodKey]
host := metadata[HTTPHostKey]
isWebsocket := connectRequest.Type == quicpogs.ConnectionTypeWebsocket
req, err := http.NewRequest(method, dest, body)
req, err := http.NewRequestWithContext(ctx, method, dest, body)
if err != nil {
return nil, err
}
@ -342,10 +419,13 @@ func buildHTTPRequest(connectRequest *quicpogs.ConnectRequest, body io.ReadClose
// * there is no transfer-encoding=chunked already set.
// So, if transfer cannot be chunked and content length is 0, we dont set a request body.
if !isWebsocket && !isTransferEncodingChunked(req) && req.ContentLength == 0 {
req.Body = nil
req.Body = http.NoBody
}
stripWebsocketUpgradeHeader(req)
return req, err
// Check for tracing on request
tracedReq := tracing.NewTracedHTTPRequest(req, log)
return tracedReq, err
}
func setContentLength(req *http.Request) error {
@ -362,3 +442,53 @@ func isTransferEncodingChunked(req *http.Request) bool {
// separated value as well.
return strings.Contains(strings.ToLower(transferEncodingVal), "chunked")
}
// A helper struct that guarantees a call to close only affects read side, but not write side.
type nopCloserReadWriter struct {
io.ReadWriteCloser
// for use by Read only
// we don't need a memory barrier here because there is an implicit assumption that
// Read calls can't happen concurrently by different go-routines.
sawEOF bool
// should be updated and read using atomic primitives.
// value is read in Read method and written in Close method, which could be done by different
// go-routines.
closed uint32
}
func (np *nopCloserReadWriter) Read(p []byte) (n int, err error) {
if np.sawEOF {
return 0, io.EOF
}
if atomic.LoadUint32(&np.closed) > 0 {
return 0, fmt.Errorf("closed by handler")
}
n, err = np.ReadWriteCloser.Read(p)
if err == io.EOF {
np.sawEOF = true
}
return
}
func (np *nopCloserReadWriter) Close() error {
atomic.StoreUint32(&np.closed, 1)
return nil
}
// returnPipe wraps DatagramMuxerV2 to satisfy the packet.FunnelUniPipe interface
type returnPipe struct {
muxer *quicpogs.DatagramMuxerV2
}
func (rp *returnPipe) SendPacket(dst netip.Addr, pk packet.RawPacket) error {
return rp.muxer.SendPacket(pk)
}
func (rp *returnPipe) Close() error {
return nil
}

View File

@ -10,6 +10,7 @@ import (
"net/http"
"net/url"
"os"
"strings"
"sync"
"testing"
"time"
@ -24,17 +25,22 @@ import (
"github.com/cloudflare/cloudflared/datagramsession"
quicpogs "github.com/cloudflare/cloudflared/quic"
"github.com/cloudflare/cloudflared/tracing"
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
)
var (
testTLSServerConfig = quicpogs.GenerateTLSConfig()
testQUICConfig = &quic.Config{
KeepAlive: true,
EnableDatagrams: true,
ConnectionIDLength: 16,
KeepAlivePeriod: 5 * time.Second,
EnableDatagrams: true,
}
)
var _ ReadWriteAcker = (*streamReadWriteAcker)(nil)
// TestQUICServer tests if a quic server accepts and responds to a quic client with the acceptance protocol.
// It also serves as a demonstration for communication with the QUIC connection started by a cloudflared.
func TestQUICServer(t *testing.T) {
@ -162,7 +168,7 @@ type fakeControlStream struct {
ControlStreamHandler
}
func (fakeControlStream) ServeControlStream(ctx context.Context, rw io.ReadWriteCloser, connOptions *tunnelpogs.ConnectionOptions) error {
func (fakeControlStream) ServeControlStream(ctx context.Context, rw io.ReadWriteCloser, connOptions *tunnelpogs.ConnectionOptions, tunnelConfigGetter TunnelConfigJSONGetter) error {
<-ctx.Done()
return nil
}
@ -219,9 +225,10 @@ func quicServer(
type mockOriginProxyWithRequest struct{}
func (moc *mockOriginProxyWithRequest) ProxyHTTP(w ResponseWriter, r *http.Request, isWebsocket bool) error {
func (moc *mockOriginProxyWithRequest) ProxyHTTP(w ResponseWriter, tr *tracing.TracedHTTPRequest, isWebsocket bool) error {
// These are a series of crude tests to ensure the headers and http related data is transferred from
// metadata.
r := tr.Request
if r.Method == "" {
return errors.New("method not sent")
}
@ -345,7 +352,7 @@ func TestBuildHTTPRequest(t *testing.T) {
},
ContentLength: 0,
Host: "cf.host",
Body: nil,
Body: http.NoBody,
},
body: io.NopCloser(&bytes.Buffer{}),
},
@ -473,18 +480,19 @@ func TestBuildHTTPRequest(t *testing.T) {
},
}
log := zerolog.Nop()
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req, err := buildHTTPRequest(test.connectRequest, test.body)
req, err := buildHTTPRequest(context.Background(), test.connectRequest, test.body, &log)
assert.NoError(t, err)
test.req = test.req.WithContext(req.Context())
assert.Equal(t, test.req, req)
assert.Equal(t, test.req, req.Request)
})
}
}
func (moc *mockOriginProxyWithRequest) ProxyTCP(ctx context.Context, rwa ReadWriteAcker, tcpRequest *TCPRequest) error {
rwa.AckConnection()
rwa.AckConnection("")
io.Copy(rwa, rwa)
return nil
}
@ -498,9 +506,10 @@ func TestServeUDPSession(t *testing.T) {
defer udpListener.Close()
ctx, cancel := context.WithCancel(context.Background())
val := udpListener.LocalAddr()
// Establish QUIC connection with edge
edgeQUICSessionChan := make(chan quic.Session)
edgeQUICSessionChan := make(chan quic.Connection)
go func() {
earlyListener, err := quic.Listen(udpListener, testTLSServerConfig, testQUICConfig)
require.NoError(t, err)
@ -510,7 +519,7 @@ func TestServeUDPSession(t *testing.T) {
edgeQUICSessionChan <- edgeQUICSession
}()
qc := testQUICConnection(udpListener.LocalAddr(), t)
qc := testQUICConnection(val, t)
go qc.Serve(ctx)
edgeQUICSession := <-edgeQUICSessionChan
@ -520,7 +529,45 @@ func TestServeUDPSession(t *testing.T) {
cancel()
}
func serveSession(ctx context.Context, qc *QUICConnection, edgeQUICSession quic.Session, closeType closeReason, expectedReason string, t *testing.T) {
func TestNopCloserReadWriterCloseBeforeEOF(t *testing.T) {
readerWriter := nopCloserReadWriter{ReadWriteCloser: &mockReaderNoopWriter{Reader: strings.NewReader("123456789")}}
buffer := make([]byte, 5)
n, err := readerWriter.Read(buffer)
require.NoError(t, err)
require.Equal(t, n, 5)
// close
require.NoError(t, readerWriter.Close())
// read should get error
n, err = readerWriter.Read(buffer)
require.Equal(t, n, 0)
require.Equal(t, err, fmt.Errorf("closed by handler"))
}
func TestNopCloserReadWriterCloseAfterEOF(t *testing.T) {
readerWriter := nopCloserReadWriter{ReadWriteCloser: &mockReaderNoopWriter{Reader: strings.NewReader("123456789")}}
buffer := make([]byte, 20)
n, err := readerWriter.Read(buffer)
require.NoError(t, err)
require.Equal(t, n, 9)
// force another read to read eof
_, err = readerWriter.Read(buffer)
require.Equal(t, err, io.EOF)
// close
require.NoError(t, readerWriter.Close())
// read should get EOF still
n, err = readerWriter.Read(buffer)
require.Equal(t, n, 0)
require.Equal(t, err, io.EOF)
}
func serveSession(ctx context.Context, qc *QUICConnection, edgeQUICSession quic.Connection, closeType closeReason, expectedReason string, t *testing.T) {
var (
payload = []byte(t.Name())
)
@ -536,8 +583,12 @@ func serveSession(ctx context.Context, qc *QUICConnection, edgeQUICSession quic.
close(sessionDone)
}()
// Send a message to the quic session on edge side, it should be deumx to this datagram session
muxedPayload := append(payload, sessionID[:]...)
// Send a message to the quic session on edge side, it should be deumx to this datagram v2 session
muxedPayload, err := quicpogs.SuffixSessionID(sessionID, payload)
require.NoError(t, err)
muxedPayload, err = quicpogs.SuffixType(muxedPayload, quicpogs.DatagramTypeUDP)
require.NoError(t, err)
err = edgeQUICSession.SendMessage(muxedPayload)
require.NoError(t, err)
@ -581,7 +632,7 @@ const (
closedByTimeout
)
func runRPCServer(ctx context.Context, session quic.Session, sessionRPCServer tunnelpogs.SessionManager, configRPCServer tunnelpogs.ConfigurationManager, t *testing.T) {
func runRPCServer(ctx context.Context, session quic.Connection, sessionRPCServer tunnelpogs.SessionManager, configRPCServer tunnelpogs.ConfigurationManager, t *testing.T) {
stream, err := session.AcceptStream(ctx)
require.NoError(t, err)
@ -606,8 +657,8 @@ type mockSessionRPCServer struct {
calledUnregisterChan chan struct{}
}
func (s mockSessionRPCServer) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeIdleAfter time.Duration) error {
return fmt.Errorf("mockSessionRPCServer doesn't implement RegisterUdpSession")
func (s mockSessionRPCServer) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeIdleAfter time.Duration, traceContext string) (*pogs.RegisterUdpSessionResponse, error) {
return nil, fmt.Errorf("mockSessionRPCServer doesn't implement RegisterUdpSession")
}
func (s mockSessionRPCServer) UnregisterUdpSession(ctx context.Context, sessionID uuid.UUID, reason string) error {
@ -636,7 +687,20 @@ func testQUICConnection(udpListenerAddr net.Addr, t *testing.T) *QUICConnection
&tunnelpogs.ConnectionOptions{},
fakeControlStream{},
&log,
nil,
)
require.NoError(t, err)
return qc
}
type mockReaderNoopWriter struct {
io.Reader
}
func (m *mockReaderNoopWriter) Write(p []byte) (n int, err error) {
return len(p), nil
}
func (m *mockReaderNoopWriter) Close() error {
return nil
}

View File

@ -4,6 +4,7 @@ import (
"context"
"fmt"
"io"
"net"
"time"
"github.com/rs/zerolog"
@ -57,6 +58,12 @@ type NamedTunnelRPCClient interface {
config *NamedTunnelProperties,
options *tunnelpogs.ConnectionOptions,
connIndex uint8,
edgeAddress net.IP,
observer *Observer,
) (*tunnelpogs.ConnectionDetails, error)
SendLocalConfiguration(
c context.Context,
config []byte,
observer *Observer,
) error
GracefulShutdown(ctx context.Context, gracePeriod time.Duration)
@ -89,8 +96,9 @@ func (rsc *registrationServerClient) RegisterConnection(
properties *NamedTunnelProperties,
options *tunnelpogs.ConnectionOptions,
connIndex uint8,
edgeAddress net.IP,
observer *Observer,
) error {
) (*tunnelpogs.ConnectionDetails, error) {
conn, err := rsc.client.RegisterConnection(
ctx,
properties.Credentials.Auth(),
@ -101,18 +109,26 @@ func (rsc *registrationServerClient) RegisterConnection(
if err != nil {
if err.Error() == DuplicateConnectionError {
observer.metrics.regFail.WithLabelValues("dup_edge_conn", "registerConnection").Inc()
return errDuplicationConnection
return nil, errDuplicationConnection
}
observer.metrics.regFail.WithLabelValues("server_error", "registerConnection").Inc()
return serverRegistrationErrorFromRPC(err)
return nil, serverRegistrationErrorFromRPC(err)
}
observer.metrics.regSuccess.WithLabelValues("registerConnection").Inc()
observer.logServerInfo(connIndex, conn.Location, fmt.Sprintf("Connection %s registered", conn.UUID))
observer.sendConnectedEvent(connIndex, conn.Location)
return conn, nil
}
return nil
func (rsc *registrationServerClient) SendLocalConfiguration(ctx context.Context, config []byte, observer *Observer) (err error) {
observer.metrics.localConfigMetrics.pushes.Inc()
defer func() {
if err != nil {
observer.metrics.localConfigMetrics.pushesErrors.Inc()
}
}()
return rsc.client.SendLocalConfiguration(ctx, config)
}
func (rsc *registrationServerClient) GracefulShutdown(ctx context.Context, gracePeriod time.Duration) {
@ -258,7 +274,7 @@ func (h *h2muxConnection) logServerInfo(ctx context.Context, rpcClient *tunnelSe
h.observer.log.Err(err).Msg("Failed to retrieve server information")
return err
}
h.observer.logServerInfo(h.connIndex, serverInfo.LocationName, "Connection established")
h.observer.logServerInfo(h.connIndex, serverInfo.LocationName, net.IP{}, "Connection established")
return nil
}
@ -274,9 +290,13 @@ func (h *h2muxConnection) registerNamedTunnel(
rpcClient := h.newRPCClientFunc(ctx, stream, h.observer.log)
defer rpcClient.Close()
if err = rpcClient.RegisterConnection(ctx, namedTunnel, connOptions, h.connIndex, h.observer); err != nil {
registrationDetails, err := rpcClient.RegisterConnection(ctx, namedTunnel, connOptions, h.connIndex, nil, h.observer)
if err != nil {
return err
}
h.observer.logServerInfo(h.connIndex, registrationDetails.Location, nil, fmt.Sprintf("Connection %s registered", registrationDetails.UUID))
h.observer.sendConnectedEvent(h.connIndex, H2mux, registrationDetails.Location)
return nil
}

View File

@ -42,9 +42,3 @@ func (sc *errClosedSession) Error() string {
return fmt.Sprintf("session closed by local due to %s", sc.message)
}
}
// newDatagram is an event when transport receives new datagram
type newDatagram struct {
sessionID uuid.UUID
payload []byte
}

View File

@ -2,16 +2,23 @@ package datagramsession
import (
"context"
"fmt"
"io"
"time"
"github.com/google/uuid"
"github.com/lucas-clemente/quic-go"
"github.com/rs/zerolog"
"golang.org/x/sync/errgroup"
"github.com/cloudflare/cloudflared/packet"
)
const (
requestChanCapacity = 16
defaultReqTimeout = time.Second * 5
)
var (
errSessionManagerClosed = fmt.Errorf("session manager closed")
)
// Manager defines the APIs to manage sessions from the same transport.
@ -22,90 +29,114 @@ type Manager interface {
RegisterSession(ctx context.Context, sessionID uuid.UUID, dstConn io.ReadWriteCloser) (*Session, error)
// UnregisterSession stops tracking the session and terminates it
UnregisterSession(ctx context.Context, sessionID uuid.UUID, message string, byRemote bool) error
// UpdateLogger updates the logger used by the Manager
UpdateLogger(log *zerolog.Logger)
}
type manager struct {
registrationChan chan *registerSessionEvent
unregistrationChan chan *unregisterSessionEvent
datagramChan chan *newDatagram
transport transport
sendFunc transportSender
receiveChan <-chan *packet.Session
closedChan <-chan struct{}
sessions map[uuid.UUID]*Session
log *zerolog.Logger
// timeout waiting for an API to finish. This can be overriden in test
timeout time.Duration
}
func NewManager(transport transport, log *zerolog.Logger) Manager {
func NewManager(log *zerolog.Logger, sendF transportSender, receiveChan <-chan *packet.Session) *manager {
return &manager{
registrationChan: make(chan *registerSessionEvent),
unregistrationChan: make(chan *unregisterSessionEvent),
// datagramChan is buffered, so it can read more datagrams from transport while the event loop is processing other events
datagramChan: make(chan *newDatagram, requestChanCapacity),
transport: transport,
sessions: make(map[uuid.UUID]*Session),
log: log,
sendFunc: sendF,
receiveChan: receiveChan,
closedChan: make(chan struct{}),
sessions: make(map[uuid.UUID]*Session),
log: log,
timeout: defaultReqTimeout,
}
}
func (m *manager) UpdateLogger(log *zerolog.Logger) {
// Benign data race, no problem if the old pointer is read or not concurrently.
m.log = log
}
func (m *manager) Serve(ctx context.Context) error {
errGroup, ctx := errgroup.WithContext(ctx)
errGroup.Go(func() error {
for {
sessionID, payload, err := m.transport.ReceiveFrom()
if err != nil {
if aerr, ok := err.(*quic.ApplicationError); ok && uint64(aerr.ErrorCode) == uint64(quic.NoError) {
return nil
} else {
return err
}
}
datagram := &newDatagram{
sessionID: sessionID,
payload: payload,
}
select {
case <-ctx.Done():
return ctx.Err()
// Only the event loop routine can update/lookup the sessions map to avoid concurrent access
// Send the datagram to the event loop. It will find the session to send to
case m.datagramChan <- datagram:
}
for {
select {
case <-ctx.Done():
m.shutdownSessions(ctx.Err())
return ctx.Err()
// receiveChan is buffered, so the transport can read more datagrams from transport while the event loop is
// processing other events
case datagram := <-m.receiveChan:
m.sendToSession(datagram)
case registration := <-m.registrationChan:
m.registerSession(ctx, registration)
case unregistration := <-m.unregistrationChan:
m.unregisterSession(unregistration)
}
})
errGroup.Go(func() error {
for {
select {
case <-ctx.Done():
return nil
case datagram := <-m.datagramChan:
m.sendToSession(datagram)
case registration := <-m.registrationChan:
m.registerSession(ctx, registration)
// TODO: TUN-5422: Unregister inactive session upon timeout
case unregistration := <-m.unregistrationChan:
m.unregisterSession(unregistration)
}
}
})
return errGroup.Wait()
}
}
func (m *manager) shutdownSessions(err error) {
if err == nil {
err = errSessionManagerClosed
}
closeSessionErr := &errClosedSession{
message: err.Error(),
// Usually connection with remote has been closed, so set this to true to skip unregistering from remote
byRemote: true,
}
for _, s := range m.sessions {
s.close(closeSessionErr)
}
}
func (m *manager) RegisterSession(ctx context.Context, sessionID uuid.UUID, originProxy io.ReadWriteCloser) (*Session, error) {
ctx, cancel := context.WithTimeout(ctx, m.timeout)
defer cancel()
event := newRegisterSessionEvent(sessionID, originProxy)
select {
case <-ctx.Done():
m.log.Error().Msg("Datagram session registration timeout")
return nil, ctx.Err()
case m.registrationChan <- event:
session := <-event.resultChan
return session, nil
// Once closedChan is closed, manager won't accept more registration because nothing is
// reading from registrationChan and it's an unbuffered channel
case <-m.closedChan:
return nil, errSessionManagerClosed
}
}
func (m *manager) registerSession(ctx context.Context, registration *registerSessionEvent) {
session := newSession(registration.sessionID, m.transport, registration.originProxy, m.log)
session := m.newSession(registration.sessionID, registration.originProxy)
m.sessions[registration.sessionID] = session
registration.resultChan <- session
}
func (m *manager) newSession(id uuid.UUID, dstConn io.ReadWriteCloser) *Session {
logger := m.log.With().Str("sessionID", id.String()).Logger()
return &Session{
ID: id,
sendFunc: m.sendFunc,
dstConn: dstConn,
// activeAtChan has low capacity. It can be full when there are many concurrent read/write. markActive() will
// drop instead of blocking because last active time only needs to be an approximation
activeAtChan: make(chan time.Time, 2),
// capacity is 2 because close() and dstToTransport routine in Serve() can write to this channel
closeChan: make(chan error, 2),
log: &logger,
}
}
func (m *manager) UnregisterSession(ctx context.Context, sessionID uuid.UUID, message string, byRemote bool) error {
ctx, cancel := context.WithTimeout(ctx, m.timeout)
defer cancel()
event := &unregisterSessionEvent{
sessionID: sessionID,
err: &errClosedSession{
@ -115,9 +146,12 @@ func (m *manager) UnregisterSession(ctx context.Context, sessionID uuid.UUID, me
}
select {
case <-ctx.Done():
m.log.Error().Msg("Datagram session unregistration timeout")
return ctx.Err()
case m.unregistrationChan <- event:
return nil
case <-m.closedChan:
return errSessionManagerClosed
}
}
@ -129,16 +163,13 @@ func (m *manager) unregisterSession(unregistration *unregisterSessionEvent) {
}
}
func (m *manager) sendToSession(datagram *newDatagram) {
session, ok := m.sessions[datagram.sessionID]
func (m *manager) sendToSession(datagram *packet.Session) {
session, ok := m.sessions[datagram.ID]
if !ok {
m.log.Error().Str("sessionID", datagram.sessionID.String()).Msg("session not found")
m.log.Error().Str("sessionID", datagram.ID.String()).Msg("session not found")
return
}
// session writes to destination over a connected UDP socket, which should not be blocking, so this call doesn't
// need to run in another go routine
_, err := session.transportToDst(datagram.payload)
if err != nil {
m.log.Err(err).Str("sessionID", datagram.sessionID.String()).Msg("Failed to write payload to session")
}
session.transportToDst(datagram.Payload)
}

View File

@ -6,6 +6,7 @@ import (
"fmt"
"io"
"net"
"sync"
"testing"
"time"
@ -13,26 +14,30 @@ import (
"github.com/rs/zerolog"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"github.com/cloudflare/cloudflared/packet"
)
var (
nopLogger = zerolog.Nop()
)
func TestManagerServe(t *testing.T) {
const (
sessions = 20
msgs = 50
sessions = 2
msgs = 5
remoteUnregisterMsg = "eyeball closed connection"
)
log := zerolog.Nop()
transport := &mockQUICTransport{
reqChan: newDatagramChannel(1),
respChan: newDatagramChannel(1),
}
mg := NewManager(transport, &log)
eyeballTracker := make(map[uuid.UUID]*datagramChannel)
for i := 0; i < sessions; i++ {
sessionID := uuid.New()
eyeballTracker[sessionID] = newDatagramChannel(1)
requestChan := make(chan *packet.Session)
transport := mockQUICTransport{
sessions: make(map[uuid.UUID]chan []byte),
}
for i := 0; i < sessions; i++ {
transport.sessions[uuid.New()] = make(chan []byte)
}
mg := NewManager(&nopLogger, transport.MuxSession, requestChan)
ctx, cancel := context.WithCancel(context.Background())
serveDone := make(chan struct{})
@ -41,53 +46,42 @@ func TestManagerServe(t *testing.T) {
close(serveDone)
}(ctx)
go func(ctx context.Context) {
for {
sessionID, payload, err := transport.respChan.Receive(ctx)
if err != nil {
require.Equal(t, context.Canceled, err)
return
}
respChan := eyeballTracker[sessionID]
require.NoError(t, respChan.Send(ctx, sessionID, payload))
}
}(ctx)
errGroup, ctx := errgroup.WithContext(ctx)
for sID, receiver := range eyeballTracker {
for sessionID, eyeballRespChan := range transport.sessions {
// Assign loop variables to local variables
sID := sessionID
payload := testPayload(sID)
expectResp := testResponse(payload)
cfdConn, originConn := net.Pipe()
origin := mockOrigin{
expectMsgCount: msgs,
expectedMsg: payload,
expectedResp: expectResp,
conn: originConn,
}
eyeball := mockEyeballSession{
id: sID,
expectedMsgCount: msgs,
expectedMsg: payload,
expectedResponse: expectResp,
respReceiver: eyeballRespChan,
}
// Assign loop variables to local variables
sessionID := sID
eyeballRespReceiver := receiver
errGroup.Go(func() error {
payload := testPayload(sessionID)
expectResp := testResponse(payload)
cfdConn, originConn := net.Pipe()
origin := mockOrigin{
expectMsgCount: msgs,
expectedMsg: payload,
expectedResp: expectResp,
conn: originConn,
}
eyeball := mockEyeball{
expectMsgCount: msgs,
expectedMsg: expectResp,
expectSessionID: sessionID,
respReceiver: eyeballRespReceiver,
}
session, err := mg.RegisterSession(ctx, sID, cfdConn)
require.NoError(t, err)
reqErrGroup, reqCtx := errgroup.WithContext(ctx)
reqErrGroup.Go(func() error {
return origin.serve()
})
reqErrGroup.Go(func() error {
return eyeball.serve(reqCtx)
return eyeball.serve(reqCtx, requestChan)
})
session, err := mg.RegisterSession(ctx, sessionID, cfdConn)
require.NoError(t, err)
sessionDone := make(chan struct{})
go func() {
closedByRemote, err := session.Serve(ctx, time.Minute*2)
@ -100,26 +94,112 @@ func TestManagerServe(t *testing.T) {
close(sessionDone)
}()
for i := 0; i < msgs; i++ {
require.NoError(t, transport.newRequest(ctx, sessionID, testPayload(sessionID)))
}
// Make sure eyeball and origin have received all messages before unregistering the session
require.NoError(t, reqErrGroup.Wait())
require.NoError(t, mg.UnregisterSession(ctx, sessionID, remoteUnregisterMsg, true))
require.NoError(t, mg.UnregisterSession(ctx, sID, remoteUnregisterMsg, true))
<-sessionDone
return nil
})
}
require.NoError(t, errGroup.Wait())
cancel()
transport.close()
<-serveDone
}
func TestTimeout(t *testing.T) {
const (
testTimeout = time.Millisecond * 50
)
mg := NewManager(&nopLogger, nil, nil)
mg.timeout = testTimeout
ctx := context.Background()
sessionID := uuid.New()
// session manager is not running, so event loop is not running and therefore calling the APIs should timeout
session, err := mg.RegisterSession(ctx, sessionID, nil)
require.ErrorIs(t, err, context.DeadlineExceeded)
require.Nil(t, session)
err = mg.UnregisterSession(ctx, sessionID, "session gone", true)
require.ErrorIs(t, err, context.DeadlineExceeded)
}
func TestUnregisterSessionCloseSession(t *testing.T) {
sessionID := uuid.New()
payload := []byte(t.Name())
sender := newMockTransportSender(sessionID, payload)
mg := NewManager(&nopLogger, sender.muxSession, nil)
ctx, cancel := context.WithCancel(context.Background())
managerDone := make(chan struct{})
go func() {
err := mg.Serve(ctx)
require.Error(t, err)
close(managerDone)
}()
cfdConn, originConn := net.Pipe()
session, err := mg.RegisterSession(ctx, sessionID, cfdConn)
require.NoError(t, err)
require.NotNil(t, session)
unregisteredChan := make(chan struct{})
go func() {
_, err := originConn.Write(payload)
require.NoError(t, err)
err = mg.UnregisterSession(ctx, sessionID, "eyeball closed session", true)
require.NoError(t, err)
close(unregisteredChan)
}()
closedByRemote, err := session.Serve(ctx, time.Minute)
require.True(t, closedByRemote)
require.Error(t, err)
<-unregisteredChan
cancel()
<-managerDone
}
func TestManagerCtxDoneCloseSessions(t *testing.T) {
sessionID := uuid.New()
payload := []byte(t.Name())
sender := newMockTransportSender(sessionID, payload)
mg := NewManager(&nopLogger, sender.muxSession, nil)
ctx, cancel := context.WithCancel(context.Background())
var wg sync.WaitGroup
wg.Add(1)
go func() {
defer wg.Done()
err := mg.Serve(ctx)
require.Error(t, err)
}()
cfdConn, originConn := net.Pipe()
session, err := mg.RegisterSession(ctx, sessionID, cfdConn)
require.NoError(t, err)
require.NotNil(t, session)
wg.Add(1)
go func() {
defer wg.Done()
_, err := originConn.Write(payload)
require.NoError(t, err)
cancel()
}()
closedByRemote, err := session.Serve(ctx, time.Minute)
require.False(t, closedByRemote)
require.Error(t, err)
wg.Wait()
}
type mockOrigin struct {
expectMsgCount int
expectedMsg []byte
@ -141,7 +221,6 @@ func (mo *mockOrigin) serve() error {
if !bytes.Equal(readBuffer[:n], mo.expectedMsg) {
return fmt.Errorf("Expect %v, read %v", mo.expectedMsg, readBuffer[:n])
}
_, err = mo.conn.Write(mo.expectedResp)
if err != nil {
return err
@ -158,65 +237,35 @@ func testResponse(msg []byte) []byte {
return []byte(fmt.Sprintf("Response to %v", msg))
}
type mockEyeball struct {
expectMsgCount int
expectedMsg []byte
expectSessionID uuid.UUID
respReceiver *datagramChannel
type mockQUICTransport struct {
sessions map[uuid.UUID]chan []byte
}
func (me *mockEyeball) serve(ctx context.Context) error {
for i := 0; i < me.expectMsgCount; i++ {
sessionID, msg, err := me.respReceiver.Receive(ctx)
if err != nil {
return err
}
if sessionID != me.expectSessionID {
return fmt.Errorf("Expect session %s, got %s", me.expectSessionID, sessionID)
}
if !bytes.Equal(msg, me.expectedMsg) {
return fmt.Errorf("Expect %v, read %v", me.expectedMsg, msg)
}
}
func (me *mockQUICTransport) MuxSession(session *packet.Session) error {
s := me.sessions[session.ID]
s <- session.Payload
return nil
}
// datagramChannel is a channel for Datagram with wrapper to send/receive with context
type datagramChannel struct {
datagramChan chan *newDatagram
closedChan chan struct{}
type mockEyeballSession struct {
id uuid.UUID
expectedMsgCount int
expectedMsg []byte
expectedResponse []byte
respReceiver <-chan []byte
}
func newDatagramChannel(capacity uint) *datagramChannel {
return &datagramChannel{
datagramChan: make(chan *newDatagram, capacity),
closedChan: make(chan struct{}),
func (me *mockEyeballSession) serve(ctx context.Context, requestChan chan *packet.Session) error {
for i := 0; i < me.expectedMsgCount; i++ {
requestChan <- &packet.Session{
ID: me.id,
Payload: me.expectedMsg,
}
resp := <-me.respReceiver
if !bytes.Equal(resp, me.expectedResponse) {
return fmt.Errorf("Expect %v, read %v", me.expectedResponse, resp)
}
fmt.Println("Resp", resp)
}
}
func (rc *datagramChannel) Send(ctx context.Context, sessionID uuid.UUID, payload []byte) error {
select {
case <-ctx.Done():
return ctx.Err()
case <-rc.closedChan:
return fmt.Errorf("datagram channel closed")
case rc.datagramChan <- &newDatagram{sessionID: sessionID, payload: payload}:
return nil
}
}
func (rc *datagramChannel) Receive(ctx context.Context) (uuid.UUID, []byte, error) {
select {
case <-ctx.Done():
return uuid.Nil, nil, ctx.Err()
case <-rc.closedChan:
return uuid.Nil, nil, fmt.Errorf("datagram channel closed")
case msg := <-rc.datagramChan:
return msg.sessionID, msg.payload, nil
}
}
func (rc *datagramChannel) Close() {
// No need to close msgChan, it will be garbage collect once there is no reference to it
close(rc.closedChan)
return nil
}

View File

@ -2,12 +2,16 @@ package datagramsession
import (
"context"
"errors"
"fmt"
"io"
"net"
"time"
"github.com/google/uuid"
"github.com/rs/zerolog"
"github.com/cloudflare/cloudflared/packet"
)
const (
@ -18,41 +22,33 @@ func SessionIdleErr(timeout time.Duration) error {
return fmt.Errorf("session idle for %v", timeout)
}
type transportSender func(session *packet.Session) error
// ErrVithVariableSeverity are errors that have variable severity
type ErrVithVariableSeverity interface {
error
// LogLevel return the severity of this error
LogLevel() zerolog.Level
}
// Session is a bidirectional pipe of datagrams between transport and dstConn
// Currently the only implementation of transport is quic DatagramMuxer
// Destination can be a connection with origin or with eyeball
// When the destination is origin:
// - Datagrams from edge are read by Manager from the transport. Manager finds the corresponding Session and calls the
// write method of the Session to send to origin
// - Datagrams from origin are read from conn and SentTo transport. Transport will return them to eyeball
// - Manager receives datagrams from receiveChan and calls the transportToDst method of the Session to send to origin
// - Datagrams from origin are read from conn and Send to transport using the transportSender callback. Transport will return them to eyeball
// When the destination is eyeball:
// - Datagrams from eyeball are read from conn and SentTo transport. Transport will send them to cloudflared
// - Datagrams from cloudflared are read by Manager from the transport. Manager finds the corresponding Session and calls the
// write method of the Session to send to eyeball
// - Datagrams from eyeball are read from conn and Send to transport. Transport will send them to cloudflared using the transportSender callback.
// - Manager receives datagrams from receiveChan and calls the transportToDst method of the Session to send to the eyeball
type Session struct {
ID uuid.UUID
transport transport
dstConn io.ReadWriteCloser
ID uuid.UUID
sendFunc transportSender
dstConn io.ReadWriteCloser
// activeAtChan is used to communicate the last read/write time
activeAtChan chan time.Time
closeChan chan error
log *zerolog.Logger
}
func newSession(id uuid.UUID, transport transport, dstConn io.ReadWriteCloser, log *zerolog.Logger) *Session {
return &Session{
ID: id,
transport: transport,
dstConn: dstConn,
// activeAtChan has low capacity. It can be full when there are many concurrent read/write. markActive() will
// drop instead of blocking because last active time only needs to be an approximation
activeAtChan: make(chan time.Time, 2),
// capacity is 2 because close() and dstToTransport routine in Serve() can write to this channel
closeChan: make(chan error, 2),
log: log,
}
}
func (s *Session) Serve(ctx context.Context, closeAfterIdle time.Duration) (closedByRemote bool, err error) {
go func() {
// QUIC implementation copies data to another buffer before returning https://github.com/lucas-clemente/quic-go/blob/v0.24.0/session.go#L1967-L1975
@ -60,9 +56,20 @@ func (s *Session) Serve(ctx context.Context, closeAfterIdle time.Duration) (clos
const maxPacketSize = 1500
readBuffer := make([]byte, maxPacketSize)
for {
if err := s.dstToTransport(readBuffer); err != nil {
s.closeChan <- err
return
if closeSession, err := s.dstToTransport(readBuffer); err != nil {
if errors.Is(err, net.ErrClosed) {
s.log.Debug().Msg("Destination connection closed")
} else {
level := zerolog.ErrorLevel
if variableErr, ok := err.(ErrVithVariableSeverity); ok {
level = variableErr.LogLevel()
}
s.log.WithLevel(level).Err(err).Msg("Failed to send session payload from destination to transport")
}
if closeSession {
s.closeChan <- err
return
}
}
}
}()
@ -103,32 +110,29 @@ func (s *Session) waitForCloseCondition(ctx context.Context, closeAfterIdle time
}
}
func (s *Session) dstToTransport(buffer []byte) error {
func (s *Session) dstToTransport(buffer []byte) (closeSession bool, err error) {
n, err := s.dstConn.Read(buffer)
s.markActive()
// https://pkg.go.dev/io#Reader suggests caller should always process n > 0 bytes
if n > 0 {
if n <= int(s.transport.MTU()) {
err = s.transport.SendTo(s.ID, buffer[:n])
} else {
// drop packet for now, eventually reply with ICMP for PMTUD
s.log.Debug().
Str("session", s.ID.String()).
Int("len", n).
Int("mtu", s.transport.MTU()).
Msg("dropped packet exceeding MTU")
if n > 0 || err == nil {
session := packet.Session{
ID: s.ID,
Payload: buffer[:n],
}
if sendErr := s.sendFunc(&session); sendErr != nil {
return false, sendErr
}
}
// Some UDP application might send 0-size payload.
if err == nil && n == 0 {
err = s.transport.SendTo(s.ID, []byte{})
}
return err
return err != nil, err
}
func (s *Session) transportToDst(payload []byte) (int, error) {
s.markActive()
return s.dstConn.Write(payload)
n, err := s.dstConn.Write(payload)
if err != nil {
s.log.Err(err).Msg("Failed to write payload to session")
}
return n, err
}
// Sends the last active time to the idle checker loop without blocking. activeAtChan will only be full when there

View File

@ -14,6 +14,8 @@ import (
"github.com/rs/zerolog"
"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
"github.com/cloudflare/cloudflared/packet"
)
// TestCloseSession makes sure a session will stop after context is done
@ -41,12 +43,10 @@ func testSessionReturns(t *testing.T, closeBy closeMethod, closeAfterIdle time.D
sessionID := uuid.New()
cfdConn, originConn := net.Pipe()
payload := testPayload(sessionID)
transport := &mockQUICTransport{
reqChan: newDatagramChannel(1),
respChan: newDatagramChannel(1),
}
log := zerolog.Nop()
session := newSession(sessionID, transport, cfdConn, &log)
mg := NewManager(&log, nil, nil)
session := mg.newSession(sessionID, cfdConn)
ctx, cancel := context.WithCancel(context.Background())
sessionDone := make(chan struct{})
@ -117,12 +117,11 @@ func testActiveSessionNotClosed(t *testing.T, readFromDst bool, writeToDst bool)
sessionID := uuid.New()
cfdConn, originConn := net.Pipe()
payload := testPayload(sessionID)
transport := &mockQUICTransport{
reqChan: newDatagramChannel(100),
respChan: newDatagramChannel(100),
}
log := zerolog.Nop()
session := newSession(sessionID, transport, cfdConn, &log)
respChan := make(chan *packet.Session)
sender := newMockTransportSender(sessionID, payload)
mg := NewManager(&nopLogger, sender.muxSession, respChan)
session := mg.newSession(sessionID, cfdConn)
startTime := time.Now()
activeUntil := startTime.Add(activeTime)
@ -184,7 +183,8 @@ func testActiveSessionNotClosed(t *testing.T, readFromDst bool, writeToDst bool)
func TestMarkActiveNotBlocking(t *testing.T) {
const concurrentCalls = 50
session := newSession(uuid.New(), nil, nil, nil)
mg := NewManager(&nopLogger, nil, nil)
session := mg.newSession(uuid.New(), nil)
var wg sync.WaitGroup
wg.Add(concurrentCalls)
for i := 0; i < concurrentCalls; i++ {
@ -196,15 +196,17 @@ func TestMarkActiveNotBlocking(t *testing.T) {
wg.Wait()
}
// Some UDP application might send 0-size payload.
func TestZeroBytePayload(t *testing.T) {
sessionID := uuid.New()
cfdConn, originConn := net.Pipe()
transport := &mockQUICTransport{
reqChan: newDatagramChannel(1),
respChan: newDatagramChannel(1),
sender := sendOnceTransportSender{
baseSender: newMockTransportSender(sessionID, make([]byte, 0)),
sentChan: make(chan struct{}),
}
log := zerolog.Nop()
session := newSession(sessionID, transport, cfdConn, &log)
mg := NewManager(&nopLogger, sender.muxSession, nil)
session := mg.newSession(sessionID, cfdConn)
ctx, cancel := context.WithCancel(context.Background())
errGroup, ctx := errgroup.WithContext(ctx)
@ -224,11 +226,39 @@ func TestZeroBytePayload(t *testing.T) {
return nil
})
receivedSessionID, payload, err := transport.respChan.Receive(ctx)
require.NoError(t, err)
require.Len(t, payload, 0)
require.Equal(t, sessionID, receivedSessionID)
<-sender.sentChan
cancel()
require.NoError(t, errGroup.Wait())
}
type mockTransportSender struct {
expectedSessionID uuid.UUID
expectedPayload []byte
}
func newMockTransportSender(expectedSessionID uuid.UUID, expectedPayload []byte) *mockTransportSender {
return &mockTransportSender{
expectedSessionID: expectedSessionID,
expectedPayload: expectedPayload,
}
}
func (mts *mockTransportSender) muxSession(session *packet.Session) error {
if session.ID != mts.expectedSessionID {
return fmt.Errorf("Expect session %s, got %s", mts.expectedSessionID, session.ID)
}
if !bytes.Equal(session.Payload, mts.expectedPayload) {
return fmt.Errorf("Expect %v, read %v", mts.expectedPayload, session.Payload)
}
return nil
}
type sendOnceTransportSender struct {
baseSender *mockTransportSender
sentChan chan struct{}
}
func (sots *sendOnceTransportSender) muxSession(session *packet.Session) error {
defer close(sots.sentChan)
return sots.baseSender.muxSession(session)
}

View File

@ -1,13 +0,0 @@
package datagramsession
import "github.com/google/uuid"
// Transport is a connection between cloudflared and edge that can multiplex datagrams from multiple sessions
type transport interface {
// SendTo writes payload for a session to the transport
SendTo(sessionID uuid.UUID, payload []byte) error
// ReceiveFrom reads the next datagram from the transport
ReceiveFrom() (uuid.UUID, []byte, error)
// Max transmission unit to receive from the transport
MTU() int
}

View File

@ -1,36 +0,0 @@
package datagramsession
import (
"context"
"github.com/google/uuid"
)
type mockQUICTransport struct {
reqChan *datagramChannel
respChan *datagramChannel
}
func (mt *mockQUICTransport) SendTo(sessionID uuid.UUID, payload []byte) error {
buf := make([]byte, len(payload))
// The QUIC implementation copies data to another buffer before returning https://github.com/lucas-clemente/quic-go/blob/v0.24.0/session.go#L1967-L1975
copy(buf, payload)
return mt.respChan.Send(context.Background(), sessionID, buf)
}
func (mt *mockQUICTransport) ReceiveFrom() (uuid.UUID, []byte, error) {
return mt.reqChan.Receive(context.Background())
}
func (mt *mockQUICTransport) MTU() int {
return 1280
}
func (mt *mockQUICTransport) newRequest(ctx context.Context, sessionID uuid.UUID, payload []byte) error {
return mt.reqChan.Send(ctx, sessionID, payload)
}
func (mt *mockQUICTransport) close() {
mt.reqChan.Close()
mt.respChan.Close()
}

View File

@ -1,4 +1,4 @@
FROM golang:1.17.5 as builder
FROM golang:1.18 as builder
ENV GO111MODULE=on \
CGO_ENABLED=0
WORKDIR /go/src/github.com/cloudflare/cloudflared/
@ -6,4 +6,4 @@ RUN apt-get update
COPY . .
# compile cloudflared
RUN make cloudflared
RUN cp /go/src/github.com/cloudflare/cloudflared/cloudflared /usr/local/bin/
RUN cp /go/src/github.com/cloudflare/cloudflared/cloudflared /usr/local/bin/

View File

@ -0,0 +1,64 @@
package allregions
// Region contains cloudflared edge addresses. The edge is partitioned into several regions for
// redundancy purposes.
type AddrSet map[*EdgeAddr]UsedBy
// AddrUsedBy finds the address used by the given connection in this region.
// Returns nil if the connection isn't using any IP.
func (a AddrSet) AddrUsedBy(connID int) *EdgeAddr {
for addr, used := range a {
if used.Used && used.ConnID == connID {
return addr
}
}
return nil
}
// AvailableAddrs counts how many unused addresses this region contains.
func (a AddrSet) AvailableAddrs() int {
n := 0
for _, usedby := range a {
if !usedby.Used {
n++
}
}
return n
}
// GetUnusedIP returns a random unused address in this region.
// Returns nil if all addresses are in use.
func (a AddrSet) GetUnusedIP(excluding *EdgeAddr) *EdgeAddr {
for addr, usedby := range a {
if !usedby.Used && addr != excluding {
return addr
}
}
return nil
}
// Use the address, assigning it to a proxy connection.
func (a AddrSet) Use(addr *EdgeAddr, connID int) {
if addr == nil {
return
}
a[addr] = InUse(connID)
}
// GetAnyAddress returns an arbitrary address from the region.
func (a AddrSet) GetAnyAddress() *EdgeAddr {
for addr := range a {
return addr
}
return nil
}
// GiveBack the address, ensuring it is no longer assigned to an IP.
// Returns true if the address is in this region.
func (a AddrSet) GiveBack(addr *EdgeAddr) (ok bool) {
if _, ok := a[addr]; !ok {
return false
}
a[addr] = Unused()
return true
}

View File

@ -0,0 +1,247 @@
package allregions
import (
"reflect"
"testing"
)
func TestAddrSet_AddrUsedBy(t *testing.T) {
type args struct {
connID int
}
tests := []struct {
name string
addrSet AddrSet
args args
want *EdgeAddr
}{
{
name: "happy trivial test",
addrSet: AddrSet{
&addr0: InUse(0),
},
args: args{connID: 0},
want: &addr0,
},
{
name: "sad trivial test",
addrSet: AddrSet{
&addr0: InUse(0),
},
args: args{connID: 1},
want: nil,
},
{
name: "sad test",
addrSet: AddrSet{
&addr0: InUse(0),
&addr1: InUse(1),
&addr2: InUse(2),
},
args: args{connID: 3},
want: nil,
},
{
name: "happy test",
addrSet: AddrSet{
&addr0: InUse(0),
&addr1: InUse(1),
&addr2: InUse(2),
},
args: args{connID: 1},
want: &addr1,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.addrSet.AddrUsedBy(tt.args.connID); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Region.AddrUsedBy() = %v, want %v", got, tt.want)
}
})
}
}
func TestAddrSet_AvailableAddrs(t *testing.T) {
tests := []struct {
name string
addrSet AddrSet
want int
}{
{
name: "contains addresses",
addrSet: AddrSet{
&addr0: InUse(0),
&addr1: Unused(),
&addr2: InUse(2),
},
want: 1,
},
{
name: "all free",
addrSet: AddrSet{
&addr0: Unused(),
&addr1: Unused(),
&addr2: Unused(),
},
want: 3,
},
{
name: "all used",
addrSet: AddrSet{
&addr0: InUse(0),
&addr1: InUse(1),
&addr2: InUse(2),
},
want: 0,
},
{
name: "empty",
addrSet: AddrSet{},
want: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.addrSet.AvailableAddrs(); got != tt.want {
t.Errorf("Region.AvailableAddrs() = %v, want %v", got, tt.want)
}
})
}
}
func TestAddrSet_GetUnusedIP(t *testing.T) {
type args struct {
excluding *EdgeAddr
}
tests := []struct {
name string
addrSet AddrSet
args args
want *EdgeAddr
}{
{
name: "happy test with excluding set",
addrSet: AddrSet{
&addr0: Unused(),
&addr1: Unused(),
&addr2: InUse(2),
},
args: args{excluding: &addr0},
want: &addr1,
},
{
name: "happy test with no excluding",
addrSet: AddrSet{
&addr0: InUse(0),
&addr1: Unused(),
&addr2: InUse(2),
},
args: args{excluding: nil},
want: &addr1,
},
{
name: "sad test with no excluding",
addrSet: AddrSet{
&addr0: InUse(0),
&addr1: InUse(1),
&addr2: InUse(2),
},
args: args{excluding: nil},
want: nil,
},
{
name: "sad test with excluding",
addrSet: AddrSet{
&addr0: Unused(),
&addr1: InUse(1),
&addr2: InUse(2),
},
args: args{excluding: &addr0},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.addrSet.GetUnusedIP(tt.args.excluding); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Region.GetUnusedIP() = %v, want %v", got, tt.want)
}
})
}
}
func TestAddrSet_GiveBack(t *testing.T) {
type args struct {
addr *EdgeAddr
}
tests := []struct {
name string
addrSet AddrSet
args args
wantOk bool
availableAfter int
}{
{
name: "sad test with excluding",
addrSet: AddrSet{
&addr1: InUse(1),
},
args: args{addr: &addr1},
wantOk: true,
availableAfter: 1,
},
{
name: "sad test with excluding",
addrSet: AddrSet{
&addr1: InUse(1),
},
args: args{addr: &addr2},
wantOk: false,
availableAfter: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if gotOk := tt.addrSet.GiveBack(tt.args.addr); gotOk != tt.wantOk {
t.Errorf("Region.GiveBack() = %v, want %v", gotOk, tt.wantOk)
}
if tt.availableAfter != tt.addrSet.AvailableAddrs() {
t.Errorf("Region.AvailableAddrs() = %v, want %v", tt.addrSet.AvailableAddrs(), tt.availableAfter)
}
})
}
}
func TestAddrSet_GetAnyAddress(t *testing.T) {
tests := []struct {
name string
addrSet AddrSet
wantNil bool
}{
{
name: "Sad test -- GetAnyAddress should only fail if the region is empty",
addrSet: AddrSet{},
wantNil: true,
},
{
name: "Happy test (all addresses unused)",
addrSet: AddrSet{
&addr0: Unused(),
},
wantNil: false,
},
{
name: "Happy test (GetAnyAddress can still return addresses used by proxy conns)",
addrSet: AddrSet{
&addr0: InUse(2),
},
wantNil: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := tt.addrSet.GetAnyAddress(); tt.wantNil != (got == nil) {
t.Errorf("Region.GetAnyAddress() = %v, but should it return nil? %v", got, tt.wantNil)
}
})
}
}

View File

@ -13,7 +13,7 @@ import (
const (
// Used to discover HA origintunneld servers
srvService = "origintunneld"
srvService = "v2-origintunneld"
srvProto = "tcp"
srvName = "argotunnel.com"
@ -32,10 +32,40 @@ var (
netLookupIP = net.LookupIP
)
// ConfigIPVersion is the selection of IP versions from config
type ConfigIPVersion int8
const (
Auto ConfigIPVersion = 2
IPv4Only ConfigIPVersion = 4
IPv6Only ConfigIPVersion = 6
)
// IPVersion is the IP version of an EdgeAddr
type EdgeIPVersion int8
const (
V4 EdgeIPVersion = 4
V6 EdgeIPVersion = 6
)
// String returns the enum's constant name.
func (c EdgeIPVersion) String() string {
switch c {
case V4:
return "4"
case V6:
return "6"
default:
return ""
}
}
// EdgeAddr is a representation of possible ways to refer an edge location.
type EdgeAddr struct {
TCP *net.TCPAddr
UDP *net.UDPAddr
TCP *net.TCPAddr
UDP *net.UDPAddr
IPVersion EdgeIPVersion
}
// If the call to net.LookupSRV fails, try to fall back to DoT from Cloudflare directly.
@ -85,6 +115,9 @@ func edgeDiscovery(log *zerolog.Logger, srvService string) ([][]*EdgeAddr, error
if err != nil {
return nil, err
}
for _, e := range edgeAddrs {
log.Debug().Msgf("Edge Address: %+v", *e)
}
resolvedAddrPerCNAME = append(resolvedAddrPerCNAME, edgeAddrs)
}
@ -120,9 +153,14 @@ func resolveSRV(srv *net.SRV) ([]*EdgeAddr, error) {
}
addrs := make([]*EdgeAddr, len(ips))
for i, ip := range ips {
version := V6
if ip.To4() != nil {
version = V4
}
addrs[i] = &EdgeAddr{
TCP: &net.TCPAddr{IP: ip, Port: int(srv.Port)},
UDP: &net.UDPAddr{IP: ip, Port: int(srv.Port)},
TCP: &net.TCPAddr{IP: ip, Port: int(srv.Port)},
UDP: &net.UDPAddr{IP: ip, Port: int(srv.Port)},
IPVersion: version,
}
}
return addrs, nil
@ -143,11 +181,15 @@ func ResolveAddrs(addrs []string, log *zerolog.Logger) (resolved []*EdgeAddr) {
log.Error().Str(logFieldAddress, addr).Err(err).Msg("failed to resolve to UDP address")
continue
}
version := V6
if udpAddr.IP.To4() != nil {
version = V4
}
resolved = append(resolved, &EdgeAddr{
TCP: tcpAddr,
UDP: udpAddr,
TCP: tcpAddr,
UDP: udpAddr,
IPVersion: version,
})
}
return
}

View File

@ -9,6 +9,115 @@ import (
"testing/quick"
)
var (
v4Addrs = []*EdgeAddr{&addr0, &addr1, &addr2, &addr3}
v6Addrs = []*EdgeAddr{&addr4, &addr5, &addr6, &addr7}
addr0 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.4.5.0"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.4.5.0"),
Port: 8000,
Zone: "",
},
IPVersion: V4,
}
addr1 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.4.5.1"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.4.5.1"),
Port: 8000,
Zone: "",
},
IPVersion: V4,
}
addr2 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.4.5.2"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.4.5.2"),
Port: 8000,
Zone: "",
},
IPVersion: V4,
}
addr3 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.4.5.3"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.4.5.3"),
Port: 8000,
Zone: "",
},
IPVersion: V4,
}
addr4 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("2606:4700:a0::1"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("2606:4700:a0::1"),
Port: 8000,
Zone: "",
},
IPVersion: V6,
}
addr5 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("2606:4700:a0::2"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("2606:4700:a0::2"),
Port: 8000,
Zone: "",
},
IPVersion: V6,
}
addr6 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("2606:4700:a0::3"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("2606:4700:a0::3"),
Port: 8000,
Zone: "",
},
IPVersion: V6,
}
addr7 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("2606:4700:a0::4"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("2606:4700:a0::4"),
Port: 8000,
Zone: "",
},
IPVersion: V6,
}
)
type mockAddrs struct {
// a set of synthetic SRV records
addrMap map[net.SRV][]*EdgeAddr
@ -36,7 +145,7 @@ func newMockAddrs(port uint16, numRegions uint8, numAddrsPerRegion uint8) mockAd
IP: net.ParseIP(fmt.Sprintf("10.0.%v.%v", r, a)),
Port: int(port),
}
addrs = append(addrs, &EdgeAddr{tcpAddr, udpAddr})
addrs = append(addrs, &EdgeAddr{tcpAddr, udpAddr, V4})
}
addrMap[srv] = addrs
numAddrs += len(addrs)

View File

@ -1,79 +1,155 @@
package allregions
import "time"
const (
timeoutDuration = 10 * time.Minute
)
// Region contains cloudflared edge addresses. The edge is partitioned into several regions for
// redundancy purposes.
type Region struct {
connFor map[*EdgeAddr]UsedBy
primaryIsActive bool
active AddrSet
primary AddrSet
secondary AddrSet
primaryTimeout time.Time
timeoutDuration time.Duration
}
// NewRegion creates a region with the given addresses, which are all unused.
func NewRegion(addrs []*EdgeAddr) Region {
func NewRegion(addrs []*EdgeAddr, overrideIPVersion ConfigIPVersion) Region {
// The zero value of UsedBy is Unused(), so we can just initialize the map's values with their
// zero values.
connFor := make(map[*EdgeAddr]UsedBy)
for _, addr := range addrs {
connFor[addr] = Unused()
connForv4 := make(AddrSet)
connForv6 := make(AddrSet)
systemPreference := V6
for i, addr := range addrs {
if i == 0 {
// First family of IPs returned is system preference of IP
systemPreference = addr.IPVersion
}
switch addr.IPVersion {
case V4:
connForv4[addr] = Unused()
case V6:
connForv6[addr] = Unused()
}
}
// Process as system preference
var primary AddrSet
var secondary AddrSet
switch systemPreference {
case V4:
primary = connForv4
secondary = connForv6
case V6:
primary = connForv6
secondary = connForv4
}
// Override with provided preference
switch overrideIPVersion {
case IPv4Only:
primary = connForv4
secondary = make(AddrSet) // empty
case IPv6Only:
primary = connForv6
secondary = make(AddrSet) // empty
case Auto:
// no change
default:
// no change
}
return Region{
connFor: connFor,
primaryIsActive: true,
active: primary,
primary: primary,
secondary: secondary,
timeoutDuration: timeoutDuration,
}
}
// AddrUsedBy finds the address used by the given connection in this region.
// Returns nil if the connection isn't using any IP.
func (r *Region) AddrUsedBy(connID int) *EdgeAddr {
for addr, used := range r.connFor {
if used.Used && used.ConnID == connID {
return addr
}
edgeAddr := r.primary.AddrUsedBy(connID)
if edgeAddr == nil {
edgeAddr = r.secondary.AddrUsedBy(connID)
}
return nil
return edgeAddr
}
// AvailableAddrs counts how many unused addresses this region contains.
func (r Region) AvailableAddrs() int {
n := 0
for _, usedby := range r.connFor {
if !usedby.Used {
n++
}
}
return n
return r.active.AvailableAddrs()
}
// GetUnusedIP returns a random unused address in this region.
// Returns nil if all addresses are in use.
func (r Region) GetUnusedIP(excluding *EdgeAddr) *EdgeAddr {
for addr, usedby := range r.connFor {
if !usedby.Used && addr != excluding {
return addr
}
}
return nil
}
// Use the address, assigning it to a proxy connection.
func (r Region) Use(addr *EdgeAddr, connID int) {
if addr == nil {
return
}
r.connFor[addr] = InUse(connID)
}
// GetAnyAddress returns an arbitrary address from the region.
func (r Region) GetAnyAddress() *EdgeAddr {
for addr := range r.connFor {
// AssignAnyAddress returns a random unused address in this region now
// assigned to the connID excluding the provided EdgeAddr.
// Returns nil if all addresses are in use for the region.
func (r Region) AssignAnyAddress(connID int, excluding *EdgeAddr) *EdgeAddr {
if addr := r.active.GetUnusedIP(excluding); addr != nil {
r.active.Use(addr, connID)
return addr
}
return nil
}
// GetAnyAddress returns an arbitrary address from the region.
func (r Region) GetAnyAddress() *EdgeAddr {
return r.active.GetAnyAddress()
}
// GiveBack the address, ensuring it is no longer assigned to an IP.
// Returns true if the address is in this region.
func (r Region) GiveBack(addr *EdgeAddr) (ok bool) {
if _, ok := r.connFor[addr]; !ok {
return false
func (r *Region) GiveBack(addr *EdgeAddr, hasConnectivityError bool) (ok bool) {
if ok = r.primary.GiveBack(addr); !ok {
// Attempt to give back the address in the secondary set
if ok = r.secondary.GiveBack(addr); !ok {
// Address is not in this region
return
}
}
r.connFor[addr] = Unused()
return true
// No connectivity error: no worry
if !hasConnectivityError {
return
}
// If using primary and returned address is IPv6 and secondary is available
if r.primaryIsActive && addr.IPVersion == V6 && len(r.secondary) > 0 {
r.active = r.secondary
r.primaryIsActive = false
r.primaryTimeout = time.Now().Add(r.timeoutDuration)
return
}
// Do nothing for IPv4 or if secondary is empty
if r.primaryIsActive {
return
}
// Immediately return to primary pool, regardless of current primary timeout
if addr.IPVersion == V4 {
activatePrimary(r)
return
}
// Timeout exceeded and can be reset to primary pool
if r.primaryTimeout.Before(time.Now()) {
activatePrimary(r)
return
}
return
}
// activatePrimary sets the primary set to the active set and resets the timeout.
func activatePrimary(r *Region) {
r.active = r.primary
r.primaryIsActive = true
r.primaryTimeout = time.Now() // reset timeout
}

View File

@ -1,284 +1,357 @@
package allregions
import (
"reflect"
"net"
"testing"
"time"
"github.com/stretchr/testify/assert"
)
func makeAddrSet(addrs []*EdgeAddr) AddrSet {
addrSet := make(AddrSet, len(addrs))
for _, addr := range addrs {
addrSet[addr] = Unused()
}
return addrSet
}
func TestRegion_New(t *testing.T) {
r := NewRegion([]*EdgeAddr{&addr0, &addr1, &addr2})
if r.AvailableAddrs() != 3 {
t.Errorf("r.AvailableAddrs() == %v but want 3", r.AvailableAddrs())
}
}
func TestRegion_AddrUsedBy(t *testing.T) {
type fields struct {
connFor map[*EdgeAddr]UsedBy
}
type args struct {
connID int
}
tests := []struct {
name string
fields fields
args args
want *EdgeAddr
name string
addrs []*EdgeAddr
mode ConfigIPVersion
expectedAddrs int
primary AddrSet
secondary AddrSet
}{
{
name: "happy trivial test",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: InUse(0),
}},
args: args{connID: 0},
want: &addr0,
name: "IPv4 addresses with IPv4Only",
addrs: v4Addrs,
mode: IPv4Only,
expectedAddrs: len(v4Addrs),
primary: makeAddrSet(v4Addrs),
secondary: AddrSet{},
},
{
name: "sad trivial test",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: InUse(0),
}},
args: args{connID: 1},
want: nil,
name: "IPv6 addresses with IPv4Only",
addrs: v6Addrs,
mode: IPv4Only,
expectedAddrs: 0,
primary: AddrSet{},
secondary: AddrSet{},
},
{
name: "sad test",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: InUse(0),
&addr1: InUse(1),
&addr2: InUse(2),
}},
args: args{connID: 3},
want: nil,
name: "IPv6 addresses with IPv6Only",
addrs: v6Addrs,
mode: IPv6Only,
expectedAddrs: len(v6Addrs),
primary: makeAddrSet(v6Addrs),
secondary: AddrSet{},
},
{
name: "happy test",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: InUse(0),
&addr1: InUse(1),
&addr2: InUse(2),
}},
args: args{connID: 1},
want: &addr1,
name: "IPv6 addresses with IPv4Only",
addrs: v6Addrs,
mode: IPv4Only,
expectedAddrs: 0,
primary: AddrSet{},
secondary: AddrSet{},
},
{
name: "IPv4 (first) and IPv6 addresses with Auto",
addrs: append(v4Addrs, v6Addrs...),
mode: Auto,
expectedAddrs: len(v4Addrs),
primary: makeAddrSet(v4Addrs),
secondary: makeAddrSet(v6Addrs),
},
{
name: "IPv6 (first) and IPv4 addresses with Auto",
addrs: append(v6Addrs, v4Addrs...),
mode: Auto,
expectedAddrs: len(v6Addrs),
primary: makeAddrSet(v6Addrs),
secondary: makeAddrSet(v4Addrs),
},
{
name: "IPv4 addresses with Auto",
addrs: v4Addrs,
mode: Auto,
expectedAddrs: len(v4Addrs),
primary: makeAddrSet(v4Addrs),
secondary: AddrSet{},
},
{
name: "IPv6 addresses with Auto",
addrs: v6Addrs,
mode: Auto,
expectedAddrs: len(v6Addrs),
primary: makeAddrSet(v6Addrs),
secondary: AddrSet{},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := &Region{
connFor: tt.fields.connFor,
r := NewRegion(tt.addrs, tt.mode)
assert.Equal(t, tt.expectedAddrs, r.AvailableAddrs())
assert.Equal(t, tt.primary, r.primary)
assert.Equal(t, tt.secondary, r.secondary)
})
}
}
func TestRegion_AnyAddress_EmptyActiveSet(t *testing.T) {
tests := []struct {
name string
addrs []*EdgeAddr
mode ConfigIPVersion
}{
{
name: "IPv6 addresses with IPv4Only",
addrs: v6Addrs,
mode: IPv4Only,
},
{
name: "IPv4 addresses with IPv6Only",
addrs: v4Addrs,
mode: IPv6Only,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := NewRegion(tt.addrs, tt.mode)
addr := r.GetAnyAddress()
assert.Nil(t, addr)
addr = r.AssignAnyAddress(0, nil)
assert.Nil(t, addr)
})
}
}
func TestRegion_AssignAnyAddress_FullyUsedActiveSet(t *testing.T) {
tests := []struct {
name string
addrs []*EdgeAddr
mode ConfigIPVersion
}{
{
name: "IPv6 addresses with IPv6Only",
addrs: v6Addrs,
mode: IPv6Only,
},
{
name: "IPv4 addresses with IPv4Only",
addrs: v4Addrs,
mode: IPv4Only,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := NewRegion(tt.addrs, tt.mode)
total := r.active.AvailableAddrs()
for i := 0; i < total; i++ {
addr := r.AssignAnyAddress(i, nil)
assert.NotNil(t, addr)
}
if got := r.AddrUsedBy(tt.args.connID); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Region.AddrUsedBy() = %v, want %v", got, tt.want)
addr := r.AssignAnyAddress(9, nil)
assert.Nil(t, addr)
})
}
}
var giveBackTests = []struct {
name string
addrs []*EdgeAddr
mode ConfigIPVersion
expectedAddrs int
primary AddrSet
secondary AddrSet
primarySwap bool
}{
{
name: "IPv4 addresses with IPv4Only",
addrs: v4Addrs,
mode: IPv4Only,
expectedAddrs: len(v4Addrs),
primary: makeAddrSet(v4Addrs),
secondary: AddrSet{},
primarySwap: false,
},
{
name: "IPv6 addresses with IPv6Only",
addrs: v6Addrs,
mode: IPv6Only,
expectedAddrs: len(v6Addrs),
primary: makeAddrSet(v6Addrs),
secondary: AddrSet{},
primarySwap: false,
},
{
name: "IPv4 (first) and IPv6 addresses with Auto",
addrs: append(v4Addrs, v6Addrs...),
mode: Auto,
expectedAddrs: len(v4Addrs),
primary: makeAddrSet(v4Addrs),
secondary: makeAddrSet(v6Addrs),
primarySwap: false,
},
{
name: "IPv6 (first) and IPv4 addresses with Auto",
addrs: append(v6Addrs, v4Addrs...),
mode: Auto,
expectedAddrs: len(v6Addrs),
primary: makeAddrSet(v6Addrs),
secondary: makeAddrSet(v4Addrs),
primarySwap: true,
},
{
name: "IPv4 addresses with Auto",
addrs: v4Addrs,
mode: Auto,
expectedAddrs: len(v4Addrs),
primary: makeAddrSet(v4Addrs),
secondary: AddrSet{},
primarySwap: false,
},
{
name: "IPv6 addresses with Auto",
addrs: v6Addrs,
mode: Auto,
expectedAddrs: len(v6Addrs),
primary: makeAddrSet(v6Addrs),
secondary: AddrSet{},
primarySwap: false,
},
}
func TestRegion_GiveBack_NoConnectivityError(t *testing.T) {
for _, tt := range giveBackTests {
t.Run(tt.name, func(t *testing.T) {
r := NewRegion(tt.addrs, tt.mode)
addr := r.AssignAnyAddress(0, nil)
assert.NotNil(t, addr)
assert.True(t, r.GiveBack(addr, false))
})
}
}
func TestRegion_GiveBack_ForeignAddr(t *testing.T) {
invalid := EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.4.5.0"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.4.5.0"),
Port: 8000,
Zone: "",
},
IPVersion: V4,
}
for _, tt := range giveBackTests {
t.Run(tt.name, func(t *testing.T) {
r := NewRegion(tt.addrs, tt.mode)
assert.False(t, r.GiveBack(&invalid, false))
assert.False(t, r.GiveBack(&invalid, true))
})
}
}
func TestRegion_GiveBack_SwapPrimary(t *testing.T) {
for _, tt := range giveBackTests {
t.Run(tt.name, func(t *testing.T) {
r := NewRegion(tt.addrs, tt.mode)
addr := r.AssignAnyAddress(0, nil)
assert.NotNil(t, addr)
assert.True(t, r.GiveBack(addr, true))
assert.Equal(t, tt.primarySwap, !r.primaryIsActive)
if tt.primarySwap {
assert.Equal(t, r.secondary, r.active)
assert.False(t, r.primaryTimeout.IsZero())
} else {
assert.Equal(t, r.primary, r.active)
assert.True(t, r.primaryTimeout.IsZero())
}
})
}
}
func TestRegion_AvailableAddrs(t *testing.T) {
type fields struct {
connFor map[*EdgeAddr]UsedBy
}
tests := []struct {
name string
fields fields
want int
}{
{
name: "contains addresses",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: InUse(0),
&addr1: Unused(),
&addr2: InUse(2),
}},
want: 1,
},
{
name: "all free",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: Unused(),
&addr1: Unused(),
&addr2: Unused(),
}},
want: 3,
},
{
name: "all used",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: InUse(0),
&addr1: InUse(1),
&addr2: InUse(2),
}},
want: 0,
},
{
name: "empty",
fields: fields{connFor: map[*EdgeAddr]UsedBy{}},
want: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := Region{
connFor: tt.fields.connFor,
}
if got := r.AvailableAddrs(); got != tt.want {
t.Errorf("Region.AvailableAddrs() = %v, want %v", got, tt.want)
}
})
}
func TestRegion_GiveBack_IPv4_ResetPrimary(t *testing.T) {
r := NewRegion(append(v6Addrs, v4Addrs...), Auto)
// Exhaust all IPv6 addresses
a0 := r.AssignAnyAddress(0, nil)
a1 := r.AssignAnyAddress(1, nil)
a2 := r.AssignAnyAddress(2, nil)
a3 := r.AssignAnyAddress(3, nil)
assert.NotNil(t, a0)
assert.NotNil(t, a1)
assert.NotNil(t, a2)
assert.NotNil(t, a3)
// Give back the first IPv6 address to fallback to secondary IPv4 address set
assert.True(t, r.GiveBack(a0, true))
assert.False(t, r.primaryIsActive)
// Give back another IPv6 address
assert.True(t, r.GiveBack(a1, true))
// Primary shouldn't change
assert.False(t, r.primaryIsActive)
// Request an address (should be IPv4 from secondary)
a4_v4 := r.AssignAnyAddress(4, nil)
assert.NotNil(t, a4_v4)
assert.Equal(t, V4, a4_v4.IPVersion)
a5_v4 := r.AssignAnyAddress(5, nil)
assert.NotNil(t, a5_v4)
assert.Equal(t, V4, a5_v4.IPVersion)
a6_v4 := r.AssignAnyAddress(6, nil)
assert.NotNil(t, a6_v4)
assert.Equal(t, V4, a6_v4.IPVersion)
// Return IPv4 address (without failure)
// Primary shouldn't change because it is not a connectivity failure
assert.True(t, r.GiveBack(a4_v4, false))
assert.False(t, r.primaryIsActive)
// Return IPv4 address (with failure)
// Primary should change because it is a connectivity failure
assert.True(t, r.GiveBack(a5_v4, true))
assert.True(t, r.primaryIsActive)
// Return IPv4 address (with failure)
// Primary shouldn't change because the address is returned to the inactive
// secondary address set
assert.True(t, r.GiveBack(a6_v4, true))
assert.True(t, r.primaryIsActive)
// Return IPv6 address (without failure)
// Primary shoudn't change because it is not a connectivity failure
assert.True(t, r.GiveBack(a2, false))
assert.True(t, r.primaryIsActive)
}
func TestRegion_GetUnusedIP(t *testing.T) {
type fields struct {
connFor map[*EdgeAddr]UsedBy
}
type args struct {
excluding *EdgeAddr
}
tests := []struct {
name string
fields fields
args args
want *EdgeAddr
}{
{
name: "happy test with excluding set",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: Unused(),
&addr1: Unused(),
&addr2: InUse(2),
}},
args: args{excluding: &addr0},
want: &addr1,
},
{
name: "happy test with no excluding",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: InUse(0),
&addr1: Unused(),
&addr2: InUse(2),
}},
args: args{excluding: nil},
want: &addr1,
},
{
name: "sad test with no excluding",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: InUse(0),
&addr1: InUse(1),
&addr2: InUse(2),
}},
args: args{excluding: nil},
want: nil,
},
{
name: "sad test with excluding",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: Unused(),
&addr1: InUse(1),
&addr2: InUse(2),
}},
args: args{excluding: &addr0},
want: nil,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := Region{
connFor: tt.fields.connFor,
}
if got := r.GetUnusedIP(tt.args.excluding); !reflect.DeepEqual(got, tt.want) {
t.Errorf("Region.GetUnusedIP() = %v, want %v", got, tt.want)
}
})
}
}
func TestRegion_GiveBack(t *testing.T) {
type fields struct {
connFor map[*EdgeAddr]UsedBy
}
type args struct {
addr *EdgeAddr
}
tests := []struct {
name string
fields fields
args args
wantOk bool
availableAfter int
}{
{
name: "sad test with excluding",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr1: InUse(1),
}},
args: args{addr: &addr1},
wantOk: true,
availableAfter: 1,
},
{
name: "sad test with excluding",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr1: InUse(1),
}},
args: args{addr: &addr2},
wantOk: false,
availableAfter: 0,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := Region{
connFor: tt.fields.connFor,
}
if gotOk := r.GiveBack(tt.args.addr); gotOk != tt.wantOk {
t.Errorf("Region.GiveBack() = %v, want %v", gotOk, tt.wantOk)
}
if tt.availableAfter != r.AvailableAddrs() {
t.Errorf("Region.AvailableAddrs() = %v, want %v", r.AvailableAddrs(), tt.availableAfter)
}
})
}
}
func TestRegion_GetAnyAddress(t *testing.T) {
type fields struct {
connFor map[*EdgeAddr]UsedBy
}
tests := []struct {
name string
fields fields
wantNil bool
}{
{
name: "Sad test -- GetAnyAddress should only fail if the region is empty",
fields: fields{connFor: map[*EdgeAddr]UsedBy{}},
wantNil: true,
},
{
name: "Happy test (all addresses unused)",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: Unused(),
}},
wantNil: false,
},
{
name: "Happy test (GetAnyAddress can still return addresses used by proxy conns)",
fields: fields{connFor: map[*EdgeAddr]UsedBy{
&addr0: InUse(2),
}},
wantNil: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
r := Region{
connFor: tt.fields.connFor,
}
if got := r.GetAnyAddress(); tt.wantNil != (got == nil) {
t.Errorf("Region.GetAnyAddress() = %v, but should it return nil? %v", got, tt.wantNil)
}
})
}
func TestRegion_GiveBack_Timeout(t *testing.T) {
r := NewRegion(append(v6Addrs, v4Addrs...), Auto)
a0 := r.AssignAnyAddress(0, nil)
a1 := r.AssignAnyAddress(1, nil)
a2 := r.AssignAnyAddress(2, nil)
assert.NotNil(t, a0)
assert.NotNil(t, a1)
assert.NotNil(t, a2)
// Give back IPv6 address to set timeout
assert.True(t, r.GiveBack(a0, true))
assert.False(t, r.primaryIsActive)
assert.False(t, r.primaryTimeout.IsZero())
// Request an address (should be IPv4 from secondary)
a3_v4 := r.AssignAnyAddress(3, nil)
assert.NotNil(t, a3_v4)
assert.Equal(t, V4, a3_v4.IPVersion)
assert.False(t, r.primaryIsActive)
// Give back IPv6 address inside timeout (no change)
assert.True(t, r.GiveBack(a2, true))
assert.False(t, r.primaryIsActive)
assert.False(t, r.primaryTimeout.IsZero())
// Accelerate timeout
r.primaryTimeout = time.Now().Add(-time.Minute)
// Return IPv6 address
assert.True(t, r.GiveBack(a1, true))
assert.True(t, r.primaryIsActive)
// Returning an IPv4 address after primary is active shouldn't change primary
// even with a connectivity error
assert.True(t, r.GiveBack(a3_v4, true))
assert.True(t, r.primaryIsActive)
}

View File

@ -18,7 +18,7 @@ type Regions struct {
// ------------------------------------
// ResolveEdge resolves the Cloudflare edge, returning all regions discovered.
func ResolveEdge(log *zerolog.Logger, region string) (*Regions, error) {
func ResolveEdge(log *zerolog.Logger, region string, overrideIPVersion ConfigIPVersion) (*Regions, error) {
edgeAddrs, err := edgeDiscovery(log, getRegionalServiceName(region))
if err != nil {
return nil, err
@ -27,8 +27,8 @@ func ResolveEdge(log *zerolog.Logger, region string) (*Regions, error) {
return nil, fmt.Errorf("expected at least 2 Cloudflare Regions regions, but SRV only returned %v", len(edgeAddrs))
}
return &Regions{
region1: NewRegion(edgeAddrs[0]),
region2: NewRegion(edgeAddrs[1]),
region1: NewRegion(edgeAddrs[0], overrideIPVersion),
region2: NewRegion(edgeAddrs[1], overrideIPVersion),
}, nil
}
@ -56,8 +56,8 @@ func NewNoResolve(addrs []*EdgeAddr) *Regions {
}
return &Regions{
region1: NewRegion(region1),
region2: NewRegion(region2),
region1: NewRegion(region1, Auto),
region2: NewRegion(region2, Auto),
}
}
@ -95,14 +95,12 @@ func (rs *Regions) GetUnusedAddr(excluding *EdgeAddr, connID int) *EdgeAddr {
// getAddrs tries to grab address form `first` region, then `second` region
// this is an unrolled loop over 2 element array
func getAddrs(excluding *EdgeAddr, connID int, first *Region, second *Region) *EdgeAddr {
addr := first.GetUnusedIP(excluding)
addr := first.AssignAnyAddress(connID, excluding)
if addr != nil {
first.Use(addr, connID)
return addr
}
addr = second.GetUnusedIP(excluding)
addr = second.AssignAnyAddress(connID, excluding)
if addr != nil {
second.Use(addr, connID)
return addr
}
@ -116,18 +114,18 @@ func (rs *Regions) AvailableAddrs() int {
// GiveBack the address so that other connections can use it.
// Returns true if the address is in this edge.
func (rs *Regions) GiveBack(addr *EdgeAddr) bool {
if found := rs.region1.GiveBack(addr); found {
func (rs *Regions) GiveBack(addr *EdgeAddr, hasConnectivityError bool) bool {
if found := rs.region1.GiveBack(addr, hasConnectivityError); found {
return found
}
return rs.region2.GiveBack(addr)
return rs.region2.GiveBack(addr, hasConnectivityError)
}
// Return regionalized service name if `region` isn't empty, otherwise return the global service name for origintunneld
func getRegionalServiceName(region string) string {
if region != "" {
return region + "-" + srvService // Example: `us-origintunneld`
return region + "-" + srvService // Example: `us-v2-origintunneld`
}
return srvService // Global service is just `origintunneld`
return srvService // Global service is just `v2-origintunneld`
}

View File

@ -1,134 +1,215 @@
package allregions
import (
"net"
"testing"
"github.com/stretchr/testify/assert"
)
var (
addr0 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.4.5.0"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.4.5.0"),
Port: 8000,
Zone: "",
},
func makeRegions(addrs []*EdgeAddr, mode ConfigIPVersion) Regions {
r1addrs := make([]*EdgeAddr, 0)
r2addrs := make([]*EdgeAddr, 0)
for i, addr := range addrs {
if i%2 == 0 {
r1addrs = append(r1addrs, addr)
} else {
r2addrs = append(r2addrs, addr)
}
}
addr1 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.4.5.1"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.4.5.1"),
Port: 8000,
Zone: "",
},
}
addr2 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.4.5.2"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.4.5.2"),
Port: 8000,
Zone: "",
},
}
addr3 = EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.4.5.3"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.4.5.3"),
Port: 8000,
Zone: "",
},
}
)
func makeRegions() Regions {
r1 := NewRegion([]*EdgeAddr{&addr0, &addr1})
r2 := NewRegion([]*EdgeAddr{&addr2, &addr3})
r1 := NewRegion(r1addrs, mode)
r2 := NewRegion(r2addrs, mode)
return Regions{region1: r1, region2: r2}
}
func TestRegions_AddrUsedBy(t *testing.T) {
rs := makeRegions()
addr1 := rs.GetUnusedAddr(nil, 1)
assert.Equal(t, addr1, rs.AddrUsedBy(1))
addr2 := rs.GetUnusedAddr(nil, 2)
assert.Equal(t, addr2, rs.AddrUsedBy(2))
addr3 := rs.GetUnusedAddr(nil, 3)
assert.Equal(t, addr3, rs.AddrUsedBy(3))
tests := []struct {
name string
addrs []*EdgeAddr
mode ConfigIPVersion
}{
{
name: "IPv4 addresses with IPv4Only",
addrs: v4Addrs,
mode: IPv4Only,
},
{
name: "IPv6 addresses with IPv6Only",
addrs: v6Addrs,
mode: IPv6Only,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
rs := makeRegions(tt.addrs, tt.mode)
addr1 := rs.GetUnusedAddr(nil, 1)
assert.Equal(t, addr1, rs.AddrUsedBy(1))
addr2 := rs.GetUnusedAddr(nil, 2)
assert.Equal(t, addr2, rs.AddrUsedBy(2))
addr3 := rs.GetUnusedAddr(nil, 3)
assert.Equal(t, addr3, rs.AddrUsedBy(3))
})
}
}
func TestRegions_Giveback_Region1(t *testing.T) {
rs := makeRegions()
rs.region1.Use(&addr0, 0)
rs.region1.Use(&addr1, 1)
rs.region2.Use(&addr2, 2)
rs.region2.Use(&addr3, 3)
tests := []struct {
name string
addrs []*EdgeAddr
mode ConfigIPVersion
}{
{
name: "IPv4 addresses with IPv4Only",
addrs: v4Addrs,
mode: IPv4Only,
},
{
name: "IPv6 addresses with IPv6Only",
addrs: v6Addrs,
mode: IPv6Only,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
rs := makeRegions(tt.addrs, tt.mode)
addr := rs.region1.AssignAnyAddress(0, nil)
rs.region1.AssignAnyAddress(1, nil)
rs.region2.AssignAnyAddress(2, nil)
rs.region2.AssignAnyAddress(3, nil)
assert.Equal(t, 0, rs.AvailableAddrs())
assert.Equal(t, 0, rs.AvailableAddrs())
rs.GiveBack(&addr0)
assert.Equal(t, &addr0, rs.GetUnusedAddr(nil, 3))
rs.GiveBack(addr, false)
assert.Equal(t, addr, rs.GetUnusedAddr(nil, 0))
})
}
}
func TestRegions_Giveback_Region2(t *testing.T) {
rs := makeRegions()
rs.region1.Use(&addr0, 0)
rs.region1.Use(&addr1, 1)
rs.region2.Use(&addr2, 2)
rs.region2.Use(&addr3, 3)
tests := []struct {
name string
addrs []*EdgeAddr
mode ConfigIPVersion
}{
{
name: "IPv4 addresses with IPv4Only",
addrs: v4Addrs,
mode: IPv4Only,
},
{
name: "IPv6 addresses with IPv6Only",
addrs: v6Addrs,
mode: IPv6Only,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
rs := makeRegions(tt.addrs, tt.mode)
rs.region1.AssignAnyAddress(0, nil)
rs.region1.AssignAnyAddress(1, nil)
addr := rs.region2.AssignAnyAddress(2, nil)
rs.region2.AssignAnyAddress(3, nil)
assert.Equal(t, 0, rs.AvailableAddrs())
assert.Equal(t, 0, rs.AvailableAddrs())
rs.GiveBack(&addr2)
assert.Equal(t, &addr2, rs.GetUnusedAddr(nil, 2))
rs.GiveBack(addr, false)
assert.Equal(t, addr, rs.GetUnusedAddr(nil, 2))
})
}
}
func TestRegions_GetUnusedAddr_OneAddrLeft(t *testing.T) {
rs := makeRegions()
tests := []struct {
name string
addrs []*EdgeAddr
mode ConfigIPVersion
}{
{
name: "IPv4 addresses with IPv4Only",
addrs: v4Addrs,
mode: IPv4Only,
},
{
name: "IPv6 addresses with IPv6Only",
addrs: v6Addrs,
mode: IPv6Only,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
rs := makeRegions(tt.addrs, tt.mode)
rs.region1.AssignAnyAddress(0, nil)
rs.region1.AssignAnyAddress(1, nil)
rs.region2.AssignAnyAddress(2, nil)
addr := rs.region2.active.GetUnusedIP(nil)
rs.region1.Use(&addr0, 0)
rs.region1.Use(&addr1, 1)
rs.region2.Use(&addr2, 2)
assert.Equal(t, 1, rs.AvailableAddrs())
assert.Equal(t, &addr3, rs.GetUnusedAddr(nil, 3))
assert.Equal(t, 1, rs.AvailableAddrs())
assert.Equal(t, addr, rs.GetUnusedAddr(nil, 3))
})
}
}
func TestRegions_GetUnusedAddr_Excluding_Region1(t *testing.T) {
rs := makeRegions()
tests := []struct {
name string
addrs []*EdgeAddr
mode ConfigIPVersion
}{
{
name: "IPv4 addresses with IPv4Only",
addrs: v4Addrs,
mode: IPv4Only,
},
{
name: "IPv6 addresses with IPv6Only",
addrs: v6Addrs,
mode: IPv6Only,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
rs := makeRegions(tt.addrs, tt.mode)
rs.region1.Use(&addr0, 0)
rs.region1.Use(&addr1, 1)
rs.region1.AssignAnyAddress(0, nil)
rs.region1.AssignAnyAddress(1, nil)
addr := rs.region2.active.GetUnusedIP(nil)
a2 := rs.region2.active.GetUnusedIP(addr)
assert.Equal(t, 2, rs.AvailableAddrs())
assert.Equal(t, &addr3, rs.GetUnusedAddr(&addr2, 3))
assert.Equal(t, 2, rs.AvailableAddrs())
assert.Equal(t, addr, rs.GetUnusedAddr(a2, 3))
})
}
}
func TestRegions_GetUnusedAddr_Excluding_Region2(t *testing.T) {
rs := makeRegions()
tests := []struct {
name string
addrs []*EdgeAddr
mode ConfigIPVersion
}{
{
name: "IPv4 addresses with IPv4Only",
addrs: v4Addrs,
mode: IPv4Only,
},
{
name: "IPv6 addresses with IPv6Only",
addrs: v6Addrs,
mode: IPv6Only,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
rs := makeRegions(tt.addrs, tt.mode)
rs.region2.Use(&addr2, 0)
rs.region2.Use(&addr3, 1)
rs.region2.AssignAnyAddress(0, nil)
rs.region2.AssignAnyAddress(1, nil)
addr := rs.region1.active.GetUnusedIP(nil)
a2 := rs.region1.active.GetUnusedIP(addr)
assert.Equal(t, 2, rs.AvailableAddrs())
assert.Equal(t, &addr1, rs.GetUnusedAddr(&addr0, 1))
assert.Equal(t, 2, rs.AvailableAddrs())
assert.Equal(t, addr, rs.GetUnusedAddr(a2, 1))
})
}
}
func TestNewNoResolveBalancesRegions(t *testing.T) {

View File

@ -1,7 +1,6 @@
package edgediscovery
import (
"fmt"
"sync"
"github.com/rs/zerolog"
@ -11,9 +10,16 @@ import (
const (
LogFieldConnIndex = "connIndex"
LogFieldIPAddress = "ip"
)
var errNoAddressesLeft = fmt.Errorf("there are no free edge addresses left")
var errNoAddressesLeft = ErrNoAddressesLeft{}
type ErrNoAddressesLeft struct{}
func (e ErrNoAddressesLeft) Error() string {
return "there are no free edge addresses left to resolve to"
}
// Edge finds addresses on the Cloudflare edge and hands them out to connections.
type Edge struct {
@ -28,8 +34,8 @@ type Edge struct {
// ResolveEdge runs the initial discovery of the Cloudflare edge, finding Addrs that can be allocated
// to connections.
func ResolveEdge(log *zerolog.Logger, region string) (*Edge, error) {
regions, err := allregions.ResolveEdge(log, region)
func ResolveEdge(log *zerolog.Logger, region string, edgeIpVersion allregions.ConfigIPVersion) (*Edge, error) {
regions, err := allregions.ResolveEdge(log, region, edgeIpVersion)
if err != nil {
return new(Edge), err
}
@ -51,15 +57,6 @@ func StaticEdge(log *zerolog.Logger, hostnames []string) (*Edge, error) {
}, nil
}
// MockEdge creates a Cloudflare Edge from arbitrary TCP addresses. Used for testing.
func MockEdge(log *zerolog.Logger, addrs []*allregions.EdgeAddr) *Edge {
regions := allregions.NewNoResolve(addrs)
return &Edge{
log: log,
regions: regions,
}
}
// ------------------------------------
// Methods
// ------------------------------------
@ -93,12 +90,15 @@ func (ed *Edge) GetAddr(connIndex int) (*allregions.EdgeAddr, error) {
log.Debug().Msg("edgediscovery - GetAddr: No addresses left to give proxy connection")
return nil, errNoAddressesLeft
}
log.Debug().Msg("edgediscovery - GetAddr: Giving connection its new address")
log = ed.log.With().
Int(LogFieldConnIndex, connIndex).
IPAddr(LogFieldIPAddress, addr.UDP.IP).Logger()
log.Debug().Msgf("edgediscovery - GetAddr: Giving connection its new address")
return addr, nil
}
// GetDifferentAddr gives back the proxy connection's edge Addr and uses a new one.
func (ed *Edge) GetDifferentAddr(connIndex int) (*allregions.EdgeAddr, error) {
func (ed *Edge) GetDifferentAddr(connIndex int, hasConnectivityError bool) (*allregions.EdgeAddr, error) {
log := ed.log.With().Int(LogFieldConnIndex, connIndex).Logger()
ed.Lock()
@ -106,7 +106,7 @@ func (ed *Edge) GetDifferentAddr(connIndex int) (*allregions.EdgeAddr, error) {
oldAddr := ed.regions.AddrUsedBy(connIndex)
if oldAddr != nil {
ed.regions.GiveBack(oldAddr)
ed.regions.GiveBack(oldAddr, hasConnectivityError)
}
addr := ed.regions.GetUnusedAddr(oldAddr, connIndex)
if addr == nil {
@ -114,7 +114,10 @@ func (ed *Edge) GetDifferentAddr(connIndex int) (*allregions.EdgeAddr, error) {
// note: if oldAddr were not nil, it will become available on the next iteration
return nil, errNoAddressesLeft
}
log.Debug().Msg("edgediscovery - GetDifferentAddr: Giving connection its new address")
log = ed.log.With().
Int(LogFieldConnIndex, connIndex).
IPAddr(LogFieldIPAddress, addr.UDP.IP).Logger()
log.Debug().Msgf("edgediscovery - GetDifferentAddr: Giving connection its new address from the address list: %v", ed.regions.AvailableAddrs())
return addr, nil
}
@ -127,9 +130,11 @@ func (ed *Edge) AvailableAddrs() int {
// GiveBack the address so that other connections can use it.
// Returns true if the address is in this edge.
func (ed *Edge) GiveBack(addr *allregions.EdgeAddr) bool {
func (ed *Edge) GiveBack(addr *allregions.EdgeAddr, hasConnectivityError bool) bool {
ed.Lock()
defer ed.Unlock()
ed.log.Debug().Msg("edgediscovery - GiveBack: Address now unused")
return ed.regions.GiveBack(addr)
log := ed.log.With().
IPAddr(LogFieldIPAddress, addr.UDP.IP).Logger()
log.Debug().Msgf("edgediscovery - GiveBack: Address now unused")
return ed.regions.GiveBack(addr, hasConnectivityError)
}

View File

@ -11,56 +11,113 @@ import (
)
var (
addr0 = allregions.EdgeAddr{
testLogger = zerolog.Nop()
v4Addrs = []*allregions.EdgeAddr{&addr0, &addr1, &addr2, &addr3}
v6Addrs = []*allregions.EdgeAddr{&addr4, &addr5, &addr6, &addr7}
addr0 = allregions.EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.0.0.0"),
IP: net.ParseIP("123.4.5.0"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.0.0.0"),
IP: net.ParseIP("123.4.5.0"),
Port: 8000,
Zone: "",
},
IPVersion: allregions.V4,
}
addr1 = allregions.EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.0.0.1"),
IP: net.ParseIP("123.4.5.1"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.0.0.1"),
IP: net.ParseIP("123.4.5.1"),
Port: 8000,
Zone: "",
},
IPVersion: allregions.V4,
}
addr2 = allregions.EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.0.0.2"),
IP: net.ParseIP("123.4.5.2"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.0.0.2"),
IP: net.ParseIP("123.4.5.2"),
Port: 8000,
Zone: "",
},
IPVersion: allregions.V4,
}
addr3 = allregions.EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("123.0.0.3"),
IP: net.ParseIP("123.4.5.3"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("123.0.0.3"),
IP: net.ParseIP("123.4.5.3"),
Port: 8000,
Zone: "",
},
IPVersion: allregions.V4,
}
addr4 = allregions.EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("2606:4700:a0::1"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("2606:4700:a0::1"),
Port: 8000,
Zone: "",
},
IPVersion: allregions.V6,
}
addr5 = allregions.EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("2606:4700:a0::2"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("2606:4700:a0::2"),
Port: 8000,
Zone: "",
},
IPVersion: allregions.V6,
}
addr6 = allregions.EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("2606:4700:a0::3"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("2606:4700:a0::3"),
Port: 8000,
Zone: "",
},
IPVersion: allregions.V6,
}
addr7 = allregions.EdgeAddr{
TCP: &net.TCPAddr{
IP: net.ParseIP("2606:4700:a0::4"),
Port: 8000,
Zone: "",
},
UDP: &net.UDPAddr{
IP: net.ParseIP("2606:4700:a0::4"),
Port: 8000,
Zone: "",
},
IPVersion: allregions.V6,
}
testLogger = zerolog.Nop()
)
func TestGiveBack(t *testing.T) {
@ -75,7 +132,7 @@ func TestGiveBack(t *testing.T) {
assert.Equal(t, 3, edge.AvailableAddrs())
// Get it back
edge.GiveBack(addr)
edge.GiveBack(addr, false)
assert.Equal(t, 4, edge.AvailableAddrs())
}
@ -107,7 +164,7 @@ func TestGetAddrForRPC(t *testing.T) {
assert.Equal(t, 4, edge.AvailableAddrs())
// Get it back
edge.GiveBack(addr)
edge.GiveBack(addr, false)
assert.Equal(t, 4, edge.AvailableAddrs())
}
@ -122,13 +179,13 @@ func TestOnePerRegion(t *testing.T) {
assert.NotNil(t, a1)
// if the first address is bad, get the second one
a2, err := edge.GetDifferentAddr(connID)
a2, err := edge.GetDifferentAddr(connID, false)
assert.NoError(t, err)
assert.NotNil(t, a2)
assert.NotEqual(t, a1, a2)
// now that second one is bad, get the first one again
a3, err := edge.GetDifferentAddr(connID)
a3, err := edge.GetDifferentAddr(connID, false)
assert.NoError(t, err)
assert.Equal(t, a1, a3)
}
@ -144,11 +201,11 @@ func TestOnlyOneAddrLeft(t *testing.T) {
assert.NotNil(t, addr)
// If that edge address is "bad", there's no alternative address.
_, err = edge.GetDifferentAddr(connID)
_, err = edge.GetDifferentAddr(connID, false)
assert.Error(t, err)
// previously bad address should become available again on next iteration.
addr, err = edge.GetDifferentAddr(connID)
addr, err = edge.GetDifferentAddr(connID, false)
assert.NoError(t, err)
assert.NotNil(t, addr)
}
@ -190,8 +247,17 @@ func TestGetDifferentAddr(t *testing.T) {
assert.Equal(t, 3, edge.AvailableAddrs())
// If the same connection requests another address, it should get the same one.
addr2, err := edge.GetDifferentAddr(connID)
addr2, err := edge.GetDifferentAddr(connID, false)
assert.NoError(t, err)
assert.NotEqual(t, addr, addr2)
assert.Equal(t, 3, edge.AvailableAddrs())
}
// MockEdge creates a Cloudflare Edge from arbitrary TCP addresses. Used for testing.
func MockEdge(log *zerolog.Logger, addrs []*allregions.EdgeAddr) *Edge {
regions := allregions.NewNoResolve(addrs)
return &Edge{
log: log,
regions: regions,
}
}

View File

@ -2,4 +2,11 @@
package main
import _ "crypto/tls/fipsonly"
import (
_ "crypto/tls/fipsonly"
"github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel"
)
func init () {
tunnel.FipsEnabled = true
}

95
github_message.py Normal file → Executable file
View File

@ -1,29 +1,56 @@
#!/usr/bin/python3
"""
Creates Github Releases Notes with content hashes
Create Github Releases Notes with binary checksums from Workers KV
"""
import argparse
import logging
import os
import hashlib
import glob
import requests
from github import Github, GithubException, UnknownObjectException
from github import Github, UnknownObjectException
FORMAT = "%(levelname)s - %(asctime)s: %(message)s"
logging.basicConfig(format=FORMAT)
logging.basicConfig(format=FORMAT, level=logging.INFO)
CLOUDFLARED_REPO = os.environ.get("GITHUB_REPO", "cloudflare/cloudflared")
GITHUB_CONFLICT_CODE = "already_exists"
BASE_KV_URL = 'https://api.cloudflare.com/client/v4/accounts/'
def get_sha256(filename):
""" get the sha256 of a file """
sha256_hash = hashlib.sha256()
with open(filename,"rb") as f:
for byte_block in iter(lambda: f.read(4096),b""):
sha256_hash.update(byte_block)
return sha256_hash.hexdigest()
def kv_get_keys(prefix, account, namespace, api_token):
""" get the KV keys for a given prefix """
response = requests.get(
BASE_KV_URL + account + "/storage/kv/namespaces/" +
namespace + "/keys" + "?prefix=" + prefix,
headers={
"Content-Type": "application/json",
"Authorization": "Bearer " + api_token,
},
)
if response.status_code != 200:
jsonResponse = response.json()
errors = jsonResponse["errors"]
if len(errors) > 0:
raise Exception("failed to get checksums: {0}", errors[0])
return response.json()["result"]
def kv_get_value(key, account, namespace, api_token):
""" get the KV value for a provided key """
response = requests.get(
BASE_KV_URL + account + "/storage/kv/namespaces/" + namespace + "/values/" + key,
headers={
"Content-Type": "application/json",
"Authorization": "Bearer " + api_token,
},
)
if response.status_code != 200:
jsonResponse = response.json()
errors = jsonResponse["errors"]
if len(errors) > 0:
raise Exception("failed to get checksums: {0}", errors[0])
return response.text
def update_or_add_message(msg, name, sha):
@ -42,6 +69,7 @@ def update_or_add_message(msg, name, sha):
return '{0}{1}```'.format(msg[:back], new_text)
return '{0} \n### SHA256 Checksums:\n```\n{1}```'.format(msg, new_text)
def get_release(repo, version):
""" Get a Github Release matching the version tag. """
try:
@ -55,11 +83,20 @@ def get_release(repo, version):
def parse_args():
""" Parse and validate args """
parser = argparse.ArgumentParser(
description="Creates Github Releases and uploads assets."
description="Updates a Github Release with checksums from KV"
)
parser.add_argument(
"--api-key", default=os.environ.get("API_KEY"), help="Github API key"
)
parser.add_argument(
"--kv-namespace-id", default=os.environ.get("KV_NAMESPACE"), help="workers KV namespace id"
)
parser.add_argument(
"--kv-account-id", default=os.environ.get("KV_ACCOUNT"), help="workers KV account id"
)
parser.add_argument(
"--kv-api-token", default=os.environ.get("KV_API_TOKEN"), help="workers KV API Token"
)
parser.add_argument(
"--release-version",
metavar="version",
@ -80,6 +117,18 @@ def parse_args():
logging.error("Missing API key")
is_valid = False
if not args.kv_namespace_id:
logging.error("Missing KV namespace id")
is_valid = False
if not args.kv_account_id:
logging.error("Missing KV account id")
is_valid = False
if not args.kv_api_token:
logging.error("Missing KV API token")
is_valid = False
if is_valid:
return args
@ -95,16 +144,20 @@ def main():
repo = client.get_repo(CLOUDFLARED_REPO)
release = get_release(repo, args.release_version)
msg = release.body
msg = ""
for filepath in glob.glob("artifacts/*"):
pkg_hash = get_sha256(filepath)
# add the sha256 of the new artifact to the release message body
name = os.path.basename(filepath)
msg = update_or_add_message(msg, name, pkg_hash)
prefix = f"update_{args.release_version}_"
keys = kv_get_keys(prefix, args.kv_account_id,
args.kv_namespace_id, args.kv_api_token)
for key in [k["name"] for k in keys]:
checksum = kv_get_value(
key, args.kv_account_id, args.kv_namespace_id, args.kv_api_token)
binary_name = key[len(prefix):]
msg = update_or_add_message(msg, binary_name, checksum)
if args.dry_run:
logging.info("Skipping asset upload because of dry-run")
logging.info("Skipping release message update because of dry-run")
logging.info(f"Github message:\n{msg}")
return
# update the release body text
@ -115,4 +168,4 @@ def main():
exit(1)
main()
main()

109
go.mod
View File

@ -1,61 +1,50 @@
module github.com/cloudflare/cloudflared
go 1.17
go 1.18
require (
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 // indirect
github.com/cloudflare/brotli-go v0.0.0-20191101163834-d34379f7ff93
github.com/cloudflare/golibs v0.0.0-20170913112048-333127dbecfc
github.com/coredns/coredns v1.8.7
github.com/coreos/go-oidc v0.0.0-20171002155002-a93f71fdfe73
github.com/coreos/go-oidc/v3 v3.4.0
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9 // indirect
github.com/facebookgo/grace v0.0.0-20180706040059-75cf19382434
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
github.com/fsnotify/fsnotify v1.4.9
github.com/gdamore/tcell v1.3.0
github.com/getsentry/raven-go v0.0.0-20180517221441-ed7bcb39ff10
github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 // indirect
github.com/gobwas/pool v0.2.1 // indirect
github.com/gobwas/ws v1.0.4
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
github.com/google/gopacket v1.1.19
github.com/google/uuid v1.3.0
github.com/gorilla/mux v1.8.0
github.com/gorilla/websocket v1.4.2
github.com/json-iterator/go v1.1.12
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/lucas-clemente/quic-go v0.24.0
github.com/lucas-clemente/quic-go v0.28.1
github.com/mattn/go-colorable v0.1.8
github.com/miekg/dns v1.1.45
github.com/mitchellh/go-homedir v1.1.0
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pkg/errors v0.9.1
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
github.com/prometheus/client_golang v1.11.0
github.com/prometheus/client_golang v1.12.1
github.com/prometheus/client_model v0.2.0
github.com/prometheus/common v0.32.1 // indirect
github.com/rivo/tview v0.0.0-20200712113419-c65badfc3d92
github.com/rs/zerolog v1.20.0
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/stretchr/testify v1.7.0
github.com/stretchr/testify v1.7.1
github.com/urfave/cli/v2 v2.3.0
go.opentelemetry.io/contrib/propagators v0.22.0
go.opentelemetry.io/otel v1.6.3
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3
go.opentelemetry.io/otel/sdk v1.6.3
go.opentelemetry.io/otel/trace v1.6.3
go.opentelemetry.io/proto/otlp v0.15.0
go.uber.org/automaxprocs v1.4.0
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb // indirect
google.golang.org/grpc v1.43.0 // indirect
gopkg.in/coreos/go-oidc.v2 v2.1.0
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa
golang.org/x/net v0.0.0-20220909164309-bea034e7d591
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f
golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211
google.golang.org/protobuf v1.28.0
gopkg.in/coreos/go-oidc.v2 v2.2.1
gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/square/go-jose.v2 v2.4.0 // indirect
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
gopkg.in/square/go-jose.v2 v2.6.0
gopkg.in/yaml.v3 v3.0.1
zombiezen.com/go/capnproto2 v2.18.0+incompatible
)
@ -63,39 +52,69 @@ require (
github.com/BurntSushi/toml v0.3.1 // indirect
github.com/apparentlymart/go-cidr v1.1.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.1 // indirect
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cheekybits/genny v1.0.0 // indirect
github.com/cloudflare/circl v1.2.1-0.20220809205628-0a9554f37a47 // indirect
github.com/coredns/caddy v1.1.1 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 // indirect
github.com/facebookgo/freeport v0.0.0-20150612182905-d4adf43b75b9 // indirect
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/francoispqt/gojay v1.2.13 // indirect
github.com/gdamore/encoding v1.0.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 // indirect
github.com/gobwas/pool v0.2.1 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/lucasb-eyer/go-colorful v1.0.3 // indirect
github.com/marten-seemann/qtls-go1-16 v0.1.4 // indirect
github.com/marten-seemann/qtls-go1-17 v0.1.0 // indirect
github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1 // indirect
github.com/kylelemons/godebug v1.1.0 // indirect
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 // indirect
github.com/mattn/go-isatty v0.0.12 // indirect
github.com/mattn/go-runewidth v0.0.8 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/nxadm/tail v1.4.8 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
github.com/opentracing/opentracing-go v1.2.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/procfs v0.6.0 // indirect
github.com/rivo/uniseg v0.1.0 // indirect
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/protobuf v1.27.1 // indirect
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90 // indirect
google.golang.org/grpc v1.47.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
)
replace github.com/urfave/cli/v2 => github.com/ipostelnik/cli/v2 v2.3.1-0.20210324024421-b6ea8234fe3d
replace github.com/lucas-clemente/quic-go => github.com/chungthuang/quic-go v0.24.1-0.20220110095058-981dc498cb62
replace github.com/lucas-clemente/quic-go => github.com/chungthuang/quic-go v0.27.1-0.20220809135021-ca330f1dec9f
// Avoid 'CVE-2022-21698'
replace github.com/prometheus/golang_client => github.com/prometheus/golang_client v1.12.1
replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
// Post-quantum tunnel RTG-1339
replace (
// branch go1.18
github.com/marten-seemann/qtls-go1-18 => github.com/cloudflare/qtls-pq v0.0.0-20221010110824-0053225e48b2
// branch go1.19
github.com/marten-seemann/qtls-go1-19 => github.com/cloudflare/qtls-pq v0.0.0-20221010110800-4f3769902fe0
)

211
go.sum
View File

@ -28,14 +28,23 @@ cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+Y
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
cloud.google.com/go v0.102.0/go.mod h1:oWcCzKlqJ5zgHQt9YsaeTY9KzIvjyy0ArmiBUgpQ+nc=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
cloud.google.com/go/compute v1.7.0/go.mod h1:435lt8av5oL9P3fv1OEzSbSUe+ybHXGMPQHHZWZxy9U=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw=
cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA=
@ -45,6 +54,7 @@ cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0Zeo
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
@ -70,7 +80,6 @@ github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBp
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/DATA-DOG/go-sqlmock v1.3.3/go.mod h1:f/Ixk793poVmq4qj/V1dPUg2JEAKC73Q5eFN3EC/SaM=
github.com/DataDog/datadog-go v4.4.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ=
github.com/DataDog/gostackparse v0.5.0/go.mod h1:lTfqcJKqS9KnXQGnyQMCugq3u1FP6UZMfWR0aitKFMM=
github.com/DataDog/sketches-go v1.0.0/go.mod h1:O+XkJHWk9w4hDwY2ZUDU31ZC9sNYlYo8DiFsxjYeo1k=
@ -100,25 +109,33 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
github.com/bwesterb/go-ristretto v1.2.2/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 h1:JLaf/iINcLyjwbtTsCJjc6rtlASgHeIJPrB6QmwURnA=
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
github.com/chungthuang/quic-go v0.24.1-0.20220110095058-981dc498cb62 h1:PLTB4iA6sOgAItzQY642tYdcGKfG/7i2gu93JQGgUcM=
github.com/chungthuang/quic-go v0.24.1-0.20220110095058-981dc498cb62/go.mod h1:YtzP8bxRVCBlO77yRanE264+fY/T2U9ZlW1AaHOsMOg=
github.com/chungthuang/quic-go v0.27.1-0.20220809135021-ca330f1dec9f h1:UWC3XjwZzocdNCzzXxq9j/1SdHMZXhcTOsh/+gNRBUQ=
github.com/chungthuang/quic-go v0.27.1-0.20220809135021-ca330f1dec9f/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/cloudflare/brotli-go v0.0.0-20191101163834-d34379f7ff93 h1:QrGfkZDnMxcWHaYDdB7CmqS9i26OAnUj/xcus/abYkY=
github.com/cloudflare/brotli-go v0.0.0-20191101163834-d34379f7ff93/go.mod h1:QiTe66jFdP7cUKMCCf/WrvDyYdtdmdZfVcdoLbzaKVY=
github.com/cloudflare/circl v1.2.1-0.20220809205628-0a9554f37a47 h1:YzpECHxZ9TzO7LpnKmPxItSd79lLgrR5heIlnqU4dTU=
github.com/cloudflare/circl v1.2.1-0.20220809205628-0a9554f37a47/go.mod h1:qhx8gBILsYlbam7h09SvHDSkjpe3TfLA7b/z4rxJvkE=
github.com/cloudflare/golibs v0.0.0-20170913112048-333127dbecfc h1:Dvk3ySBsOm5EviLx6VCyILnafPcQinXGP5jbTdHUJgE=
github.com/cloudflare/golibs v0.0.0-20170913112048-333127dbecfc/go.mod h1:HlgKKR8V5a1wroIDDIz3/A+T+9Janfq+7n1P5sEFdi0=
github.com/cloudflare/qtls-pq v0.0.0-20221010110800-4f3769902fe0 h1:LEsjEHfKnIJEJU9QEIPVRuslxpBu+2kG2DXhxpkGT+o=
github.com/cloudflare/qtls-pq v0.0.0-20221010110800-4f3769902fe0/go.mod h1:aIsWqC0WXyUiUxBl/RfxAjDyWE9CCLqvSMnCMTd/+bc=
github.com/cloudflare/qtls-pq v0.0.0-20221010110824-0053225e48b2 h1:ErNoeVNqFXV+emlf4gY7Ms7/0DbQ8PT2UFxNyWBc51Q=
github.com/cloudflare/qtls-pq v0.0.0-20221010110824-0053225e48b2/go.mod h1:mW0BgKFFDAiSmOdUwoORtjo0V2vqw5QzVYRtKQqw/Jg=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
@ -126,13 +143,14 @@ github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XP
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/coredns/caddy v1.1.1 h1:2eYKZT7i6yxIfGP3qLJoJ7HAsDJqYB+X68g4NYjSrE0=
github.com/coredns/caddy v1.1.1/go.mod h1:A6ntJQlAWuQfFlsd9hvigKbo2WS0VUs2l1e2F+BawD4=
github.com/coredns/coredns v1.8.7 h1:wVMjAnyFnY7Mc18AFO+9qbGD6ODPtdVUIlzoWrHr3hk=
github.com/coredns/coredns v1.8.7/go.mod h1:bFmbgEfeRz5aizL2VsQ5LRlsvJuXWkgG/MWG9zxqjVM=
github.com/coreos/go-oidc v0.0.0-20171002155002-a93f71fdfe73 h1:7CNPV0LWRCa1FNmqg700pbXhzvmoaXKyfxWRkjRym7Q=
github.com/coreos/go-oidc v0.0.0-20171002155002-a93f71fdfe73/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
github.com/coreos/go-oidc/v3 v3.4.0 h1:xz7elHb/LDwm/ERpwHd+5nb7wFHL32rsr6bBOgaeu6g=
github.com/coreos/go-oidc/v3 v3.4.0/go.mod h1:eHUXhZtXPQLgEaDrOVTgwbgmz1xGOkJNye6h3zkD2Pw=
github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
@ -166,6 +184,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ=
@ -184,16 +203,11 @@ github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/francoispqt/gojay v1.2.13 h1:d2m3sFjloqoIUQU3TsHBgj6qg/BVGlTBeHDUmyJnXKk=
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gdamore/encoding v1.0.0 h1:+7OoQ1Bc6eTm5niUzBa0Ctsh6JbMW6Ra+YNuAtDBdko=
github.com/gdamore/encoding v1.0.0/go.mod h1:alR0ol34c49FCSBLjhosxzcPHQbf2trDkoo5dl+VrEg=
github.com/gdamore/tcell v1.3.0 h1:r35w0JBADPZCVQijYebl6YMWWtHRqVEGt7kL2eBADRM=
github.com/gdamore/tcell v1.3.0/go.mod h1:Hjvr+Ofd+gLglo7RYKxxnzCBmev3BzsS67MebKS4zMM=
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
github.com/getsentry/raven-go v0.0.0-20180517221441-ed7bcb39ff10 h1:YO10pIIBftO/kkTFdWhctH96grJ7qiy7bMdiZcIvPKs=
github.com/getsentry/raven-go v0.0.0-20180517221441-ed7bcb39ff10/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
@ -212,6 +226,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
@ -234,6 +253,8 @@ github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU=
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -284,13 +305,17 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@ -315,14 +340,19 @@ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2cUuW7uA/OeU=
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg=
github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs=
@ -337,6 +367,8 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
@ -386,29 +418,20 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lucasb-eyer/go-colorful v1.0.2/go.mod h1:0MS4r+7BZKSJ5mw4/S5MPN+qHFF1fYclkSPilDOKW0s=
github.com/lucasb-eyer/go-colorful v1.0.3 h1:QIbQXiugsb+q10B+MI+7DI1oQLdmnep86tWFlaaUAac=
github.com/lucasb-eyer/go-colorful v1.0.3/go.mod h1:R4dSotOR9KMtayYi1e77YzuveK+i7ruzyGqttikkLy0=
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ=
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I=
github.com/marten-seemann/qtls-go1-16 v0.1.4 h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco=
github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
github.com/marten-seemann/qtls-go1-17 v0.1.0 h1:P9ggrs5xtwiqXv/FHNwntmuLMNq3KaSIG93AtAZ48xk=
github.com/marten-seemann/qtls-go1-17 v0.1.0/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8=
github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1 h1:EnzzN9fPUkUck/1CuY1FlzBaIYMoiBsdwTNmNGkwUUM=
github.com/marten-seemann/qtls-go1-18 v0.1.0-beta.1/go.mod h1:PUhIQk19LoFt2174H4+an8TYvWOGjb/hHwphBeaDHwI=
github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ=
github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ=
github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.8 h1:3tS41NlGYSmhhe/8fhGRzc+z3AYCw1Fe1WAyLuujKs0=
github.com/mattn/go-runewidth v0.0.8/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
@ -480,8 +503,9 @@ github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXP
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0 h1:HNkLOAEQMIDv/K+04rukrLx6ch7msSRwf3/SASFAGtQ=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
@ -497,21 +521,17 @@ github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0 h1:mxy4L2jP6qMonqmq+aTtOx1ifVWUgG/TAmntgbh3xv4=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rabbitmq/amqp091-go v1.1.0/go.mod h1:ogQDLSOACsLPsIq0NpbtiifNZi2YOz0VTJ0kHRghqbM=
github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4=
github.com/rivo/tview v0.0.0-20200712113419-c65badfc3d92 h1:rqaqSUdaW+OBbjnsrOoiaJv43mSRARuvsAuirmdxu7E=
github.com/rivo/tview v0.0.0-20200712113419-c65badfc3d92/go.mod h1:6lkG1x+13OShEf0EaOCaTQYyB7d5nSbb181KtjlS+84=
github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
@ -560,8 +580,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ=
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@ -586,7 +607,22 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/contrib/propagators v0.22.0 h1:KGdv58M2//veiYLIhb31mofaI2LgkIPXXAZVeYVyfd8=
go.opentelemetry.io/contrib/propagators v0.22.0/go.mod h1:xGOuXr6lLIF9BXipA4pm6UuOSI0M98U6tsI3khbOiwU=
go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM=
go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE=
go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI=
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3/go.mod h1:NEu79Xo32iVb+0gVNV8PMd7GoWqnyDXRlj04yFjqz40=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3 h1:4/UjHWMVVc5VwX/KAtqJOHErKigMCH8NexChMuanb/o=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3/go.mod h1:UJmXdiVVBaZ63umRUTwJuCMAV//GCMvDiQwn703/GoY=
go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs=
go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ=
go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4=
go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc=
go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.opentelemetry.io/proto/otlp v0.15.0 h1:h0bKrvdrT/9sBwEJ6iWUqT/N/xPcS66bL4u3isneJ6w=
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0=
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
@ -607,8 +643,9 @@ golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201112155050-0c6587e931a9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210920023735-84f357641f63/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa h1:zuSxTR4o9y82ebqCUJYNGJbGPo6sKVl54f/TVDObg1c=
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -694,10 +731,19 @@ golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210917221730-978cfadd31cf/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d h1:1n1fc535VhN8SYtD4cDUyNlfpAF2ROMM9+11equK3hs=
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220607020251-c690dde0001d/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220624214902-1bab6f366d9e/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591 h1:D0B/7al0LLrVC8aWF4+oxpv/m8bc7ViFfVS8/gXGdqI=
golang.org/x/net v0.0.0-20220909164309-bea034e7d591/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
@ -715,8 +761,13 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8 h1:RerP+noqYHUQ8CMRcPlC2nvTa4dcBIjegkuWdcUDuqg=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7LmZnYtzbRWj0W47D86a3TGe0YHBvmE=
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094 h1:2o1E+E8TpNLklK9nHiPiK1uzIYrIHt+cQx3ynCwq9V8=
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -728,8 +779,9 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8=
golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -744,7 +796,6 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190626150813-e07cf5db2756/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -788,6 +839,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@ -800,11 +852,25 @@ golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e h1:fLOSk5Q00efkSvAm+4xcoXD+RRmLmmulPn5I3Y9F2EM=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220610221304-9f5ed59c137d/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664 h1:v1W7bwXHsnLLloWYTVEdvGvA7BHMeBYsPcF0GLDxIRs=
golang.org/x/sys v0.0.0-20220808155132-1c4a2a72c664/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -884,8 +950,11 @@ golang.org/x/tools v0.1.6-0.20210726203631-07bc1bf47fb2/go.mod h1:o0xws9oXOQQZyj
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f h1:uF6paiQQebLeSXkrTqHqz0MXhXXS1KgF41eUdBNvxK0=
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
@ -919,7 +988,16 @@ google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqiv
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
google.golang.org/api v0.64.0/go.mod h1:931CdxA8Rm4t6zqTFGSsgwbAEZ2+GMYurbndwSimebM=
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -974,6 +1052,7 @@ google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210329143202-679c6ae281ee/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A=
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
@ -993,8 +1072,27 @@ google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEc
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb h1:ZrsicilzPCS/Xr8qtBZZLpy4P9TYXAfl49ctG1/5tgw=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220518221133-4f43b3371335/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220523171625-347a074981d8/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90 h1:4SPz2GL2CXJt28MTF8V6Ap/9ZiVbQlJeGSd9qtA7DLs=
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
@ -1027,8 +1125,14 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM=
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.47.0 h1:9n77onPX5F3qfFCqjy9dhn8PbNQsIKeVU04J9G7umt8=
google.golang.org/grpc v1.47.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -1042,8 +1146,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/DataDog/dd-trace-go.v1 v1.34.0/go.mod h1:HtrC65fyJ6lWazShCC9rlOeiTSZJ0XtZhkwjZM2WpC4=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -1052,15 +1157,15 @@ gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8
gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/coreos/go-oidc.v2 v2.1.0 h1:E8PjVFdj/SLDKB0hvb70KTbMbYVHjqztiQdSkIg8E+I=
gopkg.in/coreos/go-oidc.v2 v2.1.0/go.mod h1:fYaTe2FS96wZZwR17YTDHwG+Mw6fmyqJNxN2eNCGPCI=
gopkg.in/coreos/go-oidc.v2 v2.2.1 h1:MY5SZClJ7vhjKfr64a4nHAOV/c3WH2gB9BMrR64J1Mc=
gopkg.in/coreos/go-oidc.v2 v2.2.1/go.mod h1:fYaTe2FS96wZZwR17YTDHwG+Mw6fmyqJNxN2eNCGPCI=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
gopkg.in/square/go-jose.v2 v2.4.0 h1:0kXPskUMGAXXWJlP05ktEMOV0vmzFQUWw6d+aZJQU8A=
gopkg.in/square/go-jose.v2 v2.4.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -1072,10 +1177,8 @@ gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View File

@ -11,14 +11,17 @@ import (
"github.com/cloudflare/cloudflared/tlsconfig"
)
const (
defaultConnectTimeout = 30 * time.Second
defaultTLSTimeout = 10 * time.Second
defaultTCPKeepAlive = 30 * time.Second
defaultKeepAliveConnections = 100
defaultKeepAliveTimeout = 90 * time.Second
defaultProxyAddress = "127.0.0.1"
var (
defaultHTTPConnectTimeout = config.CustomDuration{Duration: 30 * time.Second}
defaultWarpRoutingConnectTimeout = config.CustomDuration{Duration: 5 * time.Second}
defaultTLSTimeout = config.CustomDuration{Duration: 10 * time.Second}
defaultTCPKeepAlive = config.CustomDuration{Duration: 30 * time.Second}
defaultKeepAliveTimeout = config.CustomDuration{Duration: 90 * time.Second}
)
const (
defaultProxyAddress = "127.0.0.1"
defaultKeepAliveConnections = 100
SSHServerFlag = "ssh-server"
Socks5Flag = "socks5"
ProxyConnectTimeoutFlag = "proxy-connect-timeout"
@ -33,47 +36,90 @@ const (
NoChunkedEncodingFlag = "no-chunked-encoding"
ProxyAddressFlag = "proxy-address"
ProxyPortFlag = "proxy-port"
Http2OriginFlag = "http2-origin"
)
const (
socksProxy = "socks"
)
type WarpRoutingConfig struct {
Enabled bool `yaml:"enabled" json:"enabled"`
ConnectTimeout config.CustomDuration `yaml:"connectTimeout" json:"connectTimeout,omitempty"`
TCPKeepAlive config.CustomDuration `yaml:"tcpKeepAlive" json:"tcpKeepAlive,omitempty"`
}
func NewWarpRoutingConfig(raw *config.WarpRoutingConfig) WarpRoutingConfig {
cfg := WarpRoutingConfig{
Enabled: raw.Enabled,
ConnectTimeout: defaultWarpRoutingConnectTimeout,
TCPKeepAlive: defaultTCPKeepAlive,
}
if raw.ConnectTimeout != nil {
cfg.ConnectTimeout = *raw.ConnectTimeout
}
if raw.TCPKeepAlive != nil {
cfg.TCPKeepAlive = *raw.TCPKeepAlive
}
return cfg
}
func (c *WarpRoutingConfig) RawConfig() config.WarpRoutingConfig {
raw := config.WarpRoutingConfig{
Enabled: c.Enabled,
}
if c.ConnectTimeout.Duration != defaultWarpRoutingConnectTimeout.Duration {
raw.ConnectTimeout = &c.ConnectTimeout
}
if c.TCPKeepAlive.Duration != defaultTCPKeepAlive.Duration {
raw.TCPKeepAlive = &c.TCPKeepAlive
}
return raw
}
// RemoteConfig models ingress settings that can be managed remotely, for example through the dashboard.
type RemoteConfig struct {
Ingress Ingress
WarpRouting config.WarpRoutingConfig
WarpRouting WarpRoutingConfig
}
type remoteConfigJSON struct {
GlobalOriginRequest config.OriginRequestConfig `json:"originRequest"`
type RemoteConfigJSON struct {
GlobalOriginRequest *config.OriginRequestConfig `json:"originRequest,omitempty"`
IngressRules []config.UnvalidatedIngressRule `json:"ingress"`
WarpRouting config.WarpRoutingConfig `json:"warp-routing"`
}
func (rc *RemoteConfig) UnmarshalJSON(b []byte) error {
var rawConfig remoteConfigJSON
var rawConfig RemoteConfigJSON
if err := json.Unmarshal(b, &rawConfig); err != nil {
return err
}
ingress, err := validateIngress(rawConfig.IngressRules, originRequestFromConfig(rawConfig.GlobalOriginRequest))
// if nil, just assume the default values.
globalOriginRequestConfig := rawConfig.GlobalOriginRequest
if globalOriginRequestConfig == nil {
globalOriginRequestConfig = &config.OriginRequestConfig{}
}
ingress, err := validateIngress(rawConfig.IngressRules, originRequestFromConfig(*globalOriginRequestConfig))
if err != nil {
return err
}
rc.Ingress = ingress
rc.WarpRouting = rawConfig.WarpRouting
rc.WarpRouting = NewWarpRoutingConfig(&rawConfig.WarpRouting)
return nil
}
func originRequestFromSingeRule(c *cli.Context) OriginRequestConfig {
var connectTimeout time.Duration = defaultConnectTimeout
var tlsTimeout time.Duration = defaultTLSTimeout
var tcpKeepAlive time.Duration = defaultTCPKeepAlive
var connectTimeout = defaultHTTPConnectTimeout
var tlsTimeout = defaultTLSTimeout
var tcpKeepAlive = defaultTCPKeepAlive
var noHappyEyeballs bool
var keepAliveConnections int = defaultKeepAliveConnections
var keepAliveTimeout time.Duration = defaultKeepAliveTimeout
var keepAliveConnections = defaultKeepAliveConnections
var keepAliveTimeout = defaultKeepAliveTimeout
var httpHostHeader string
var originServerName string
var caPool string
@ -83,14 +129,15 @@ func originRequestFromSingeRule(c *cli.Context) OriginRequestConfig {
var proxyAddress = defaultProxyAddress
var proxyPort uint
var proxyType string
var http2Origin bool
if flag := ProxyConnectTimeoutFlag; c.IsSet(flag) {
connectTimeout = c.Duration(flag)
connectTimeout = config.CustomDuration{Duration: c.Duration(flag)}
}
if flag := ProxyTLSTimeoutFlag; c.IsSet(flag) {
tlsTimeout = c.Duration(flag)
tlsTimeout = config.CustomDuration{Duration: c.Duration(flag)}
}
if flag := ProxyTCPKeepAliveFlag; c.IsSet(flag) {
tcpKeepAlive = c.Duration(flag)
tcpKeepAlive = config.CustomDuration{Duration: c.Duration(flag)}
}
if flag := ProxyNoHappyEyeballsFlag; c.IsSet(flag) {
noHappyEyeballs = c.Bool(flag)
@ -99,7 +146,7 @@ func originRequestFromSingeRule(c *cli.Context) OriginRequestConfig {
keepAliveConnections = c.Int(flag)
}
if flag := ProxyKeepAliveTimeoutFlag; c.IsSet(flag) {
keepAliveTimeout = c.Duration(flag)
keepAliveTimeout = config.CustomDuration{Duration: c.Duration(flag)}
}
if flag := HTTPHostHeaderFlag; c.IsSet(flag) {
httpHostHeader = c.String(flag)
@ -126,9 +173,13 @@ func originRequestFromSingeRule(c *cli.Context) OriginRequestConfig {
// Note TUN-3758 , we use Int because UInt is not supported with altsrc
proxyPort = uint(c.Int(flag))
}
if flag := Http2OriginFlag; c.IsSet(flag) {
http2Origin = c.Bool(flag)
}
if c.IsSet(Socks5Flag) {
proxyType = socksProxy
}
return OriginRequestConfig{
ConnectTimeout: connectTimeout,
TLSTimeout: tlsTimeout,
@ -145,12 +196,13 @@ func originRequestFromSingeRule(c *cli.Context) OriginRequestConfig {
ProxyAddress: proxyAddress,
ProxyPort: proxyPort,
ProxyType: proxyType,
Http2Origin: http2Origin,
}
}
func originRequestFromConfig(c config.OriginRequestConfig) OriginRequestConfig {
out := OriginRequestConfig{
ConnectTimeout: defaultConnectTimeout,
ConnectTimeout: defaultHTTPConnectTimeout,
TLSTimeout: defaultTLSTimeout,
TCPKeepAlive: defaultTCPKeepAlive,
KeepAliveConnections: defaultKeepAliveConnections,
@ -210,6 +262,12 @@ func originRequestFromConfig(c config.OriginRequestConfig) OriginRequestConfig {
}
}
}
if c.Http2Origin != nil {
out.Http2Origin = *c.Http2Origin
}
if c.Access != nil {
out.Access = *c.Access
}
return out
}
@ -218,41 +276,46 @@ func originRequestFromConfig(c config.OriginRequestConfig) OriginRequestConfig {
// Note: To specify a time.Duration in go-yaml, use e.g. "3s" or "24h".
type OriginRequestConfig struct {
// HTTP proxy timeout for establishing a new connection
ConnectTimeout time.Duration `yaml:"connectTimeout"`
ConnectTimeout config.CustomDuration `yaml:"connectTimeout" json:"connectTimeout"`
// HTTP proxy timeout for completing a TLS handshake
TLSTimeout time.Duration `yaml:"tlsTimeout"`
TLSTimeout config.CustomDuration `yaml:"tlsTimeout" json:"tlsTimeout"`
// HTTP proxy TCP keepalive duration
TCPKeepAlive time.Duration `yaml:"tcpKeepAlive"`
TCPKeepAlive config.CustomDuration `yaml:"tcpKeepAlive" json:"tcpKeepAlive"`
// HTTP proxy should disable "happy eyeballs" for IPv4/v6 fallback
NoHappyEyeballs bool `yaml:"noHappyEyeballs"`
NoHappyEyeballs bool `yaml:"noHappyEyeballs" json:"noHappyEyeballs"`
// HTTP proxy timeout for closing an idle connection
KeepAliveTimeout time.Duration `yaml:"keepAliveTimeout"`
KeepAliveTimeout config.CustomDuration `yaml:"keepAliveTimeout" json:"keepAliveTimeout"`
// HTTP proxy maximum keepalive connection pool size
KeepAliveConnections int `yaml:"keepAliveConnections"`
KeepAliveConnections int `yaml:"keepAliveConnections" json:"keepAliveConnections"`
// Sets the HTTP Host header for the local webserver.
HTTPHostHeader string `yaml:"httpHostHeader"`
HTTPHostHeader string `yaml:"httpHostHeader" json:"httpHostHeader"`
// Hostname on the origin server certificate.
OriginServerName string `yaml:"originServerName"`
OriginServerName string `yaml:"originServerName" json:"originServerName"`
// Path to the CA for the certificate of your origin.
// This option should be used only if your certificate is not signed by Cloudflare.
CAPool string `yaml:"caPool"`
CAPool string `yaml:"caPool" json:"caPool"`
// Disables TLS verification of the certificate presented by your origin.
// Will allow any certificate from the origin to be accepted.
// Note: The connection from your machine to Cloudflare's Edge is still encrypted.
NoTLSVerify bool `yaml:"noTLSVerify"`
NoTLSVerify bool `yaml:"noTLSVerify" json:"noTLSVerify"`
// Disables chunked transfer encoding.
// Useful if you are running a WSGI server.
DisableChunkedEncoding bool `yaml:"disableChunkedEncoding"`
DisableChunkedEncoding bool `yaml:"disableChunkedEncoding" json:"disableChunkedEncoding"`
// Runs as jump host
BastionMode bool `yaml:"bastionMode"`
BastionMode bool `yaml:"bastionMode" json:"bastionMode"`
// Listen address for the proxy.
ProxyAddress string `yaml:"proxyAddress"`
ProxyAddress string `yaml:"proxyAddress" json:"proxyAddress"`
// Listen port for the proxy.
ProxyPort uint `yaml:"proxyPort"`
ProxyPort uint `yaml:"proxyPort" json:"proxyPort"`
// What sort of proxy should be started
ProxyType string `yaml:"proxyType"`
ProxyType string `yaml:"proxyType" json:"proxyType"`
// IP rules for the proxy service
IPRules []ipaccess.Rule `yaml:"ipRules"`
IPRules []ipaccess.Rule `yaml:"ipRules" json:"ipRules"`
// Attempt to connect to origin with HTTP/2
Http2Origin bool `yaml:"http2Origin" json:"http2Origin"`
// Access holds all access related configs
Access config.AccessConfig `yaml:"access" json:"access,omitempty"`
}
func (defaults *OriginRequestConfig) setConnectTimeout(overrides config.OriginRequestConfig) {
@ -358,6 +421,12 @@ func (defaults *OriginRequestConfig) setIPRules(overrides config.OriginRequestCo
}
}
func (defaults *OriginRequestConfig) setHttp2Origin(overrides config.OriginRequestConfig) {
if val := overrides.Http2Origin; val != nil {
defaults.Http2Origin = *val
}
}
// SetConfig gets config for the requests that cloudflared sends to origins.
// Each field has a setter method which sets a value for the field by trying to find:
// 1. The user config for this rule
@ -383,5 +452,95 @@ func setConfig(defaults OriginRequestConfig, overrides config.OriginRequestConfi
cfg.setProxyAddress(overrides)
cfg.setProxyType(overrides)
cfg.setIPRules(overrides)
cfg.setHttp2Origin(overrides)
return cfg
}
func ConvertToRawOriginConfig(c OriginRequestConfig) config.OriginRequestConfig {
var connectTimeout *config.CustomDuration
var tlsTimeout *config.CustomDuration
var tcpKeepAlive *config.CustomDuration
var keepAliveConnections *int
var keepAliveTimeout *config.CustomDuration
var proxyAddress *string
if c.ConnectTimeout != defaultHTTPConnectTimeout {
connectTimeout = &c.ConnectTimeout
}
if c.TLSTimeout != defaultTLSTimeout {
tlsTimeout = &c.TLSTimeout
}
if c.TCPKeepAlive != defaultTCPKeepAlive {
tcpKeepAlive = &c.TCPKeepAlive
}
if c.KeepAliveConnections != defaultKeepAliveConnections {
keepAliveConnections = &c.KeepAliveConnections
}
if c.KeepAliveTimeout != defaultKeepAliveTimeout {
keepAliveTimeout = &c.KeepAliveTimeout
}
if c.ProxyAddress != defaultProxyAddress {
proxyAddress = &c.ProxyAddress
}
return config.OriginRequestConfig{
ConnectTimeout: connectTimeout,
TLSTimeout: tlsTimeout,
TCPKeepAlive: tcpKeepAlive,
NoHappyEyeballs: defaultBoolToNil(c.NoHappyEyeballs),
KeepAliveConnections: keepAliveConnections,
KeepAliveTimeout: keepAliveTimeout,
HTTPHostHeader: emptyStringToNil(c.HTTPHostHeader),
OriginServerName: emptyStringToNil(c.OriginServerName),
CAPool: emptyStringToNil(c.CAPool),
NoTLSVerify: defaultBoolToNil(c.NoTLSVerify),
DisableChunkedEncoding: defaultBoolToNil(c.DisableChunkedEncoding),
BastionMode: defaultBoolToNil(c.BastionMode),
ProxyAddress: proxyAddress,
ProxyPort: zeroUIntToNil(c.ProxyPort),
ProxyType: emptyStringToNil(c.ProxyType),
IPRules: convertToRawIPRules(c.IPRules),
Http2Origin: defaultBoolToNil(c.Http2Origin),
}
}
func convertToRawIPRules(ipRules []ipaccess.Rule) []config.IngressIPRule {
result := make([]config.IngressIPRule, 0)
for _, r := range ipRules {
cidr := r.StringCIDR()
newRule := config.IngressIPRule{
Prefix: &cidr,
Ports: r.Ports(),
Allow: r.RulePolicy(),
}
result = append(result, newRule)
}
return result
}
func defaultBoolToNil(b bool) *bool {
if b == false {
return nil
}
return &b
}
func emptyStringToNil(s string) *string {
if s == "" {
return nil
}
return &s
}
func zeroUIntToNil(v uint) *uint {
if v == 0 {
return nil
}
return &v
}

View File

@ -8,7 +8,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/urfave/cli/v2"
yaml "gopkg.in/yaml.v2"
yaml "gopkg.in/yaml.v3"
"github.com/cloudflare/cloudflared/config"
"github.com/cloudflare/cloudflared/ipaccess"
@ -65,7 +65,7 @@ func TestUnmarshalRemoteConfigOverridesGlobal(t *testing.T) {
err := json.Unmarshal(rawConfig, &remoteConfig)
require.NoError(t, err)
require.True(t, remoteConfig.Ingress.Rules[0].Config.NoTLSVerify)
require.True(t, remoteConfig.Ingress.defaults.NoHappyEyeballs)
require.True(t, remoteConfig.Ingress.Defaults.NoHappyEyeballs)
}
func TestOriginRequestConfigOverrides(t *testing.T) {
@ -74,11 +74,11 @@ func TestOriginRequestConfigOverrides(t *testing.T) {
// root-level configuration.
actual0 := ing.Rules[0].Config
expected0 := OriginRequestConfig{
ConnectTimeout: 1 * time.Minute,
TLSTimeout: 1 * time.Second,
TCPKeepAlive: 1 * time.Second,
ConnectTimeout: config.CustomDuration{Duration: 1 * time.Minute},
TLSTimeout: config.CustomDuration{Duration: 1 * time.Second},
TCPKeepAlive: config.CustomDuration{Duration: 1 * time.Second},
NoHappyEyeballs: true,
KeepAliveTimeout: 1 * time.Second,
KeepAliveTimeout: config.CustomDuration{Duration: 1 * time.Second},
KeepAliveConnections: 1,
HTTPHostHeader: "abc",
OriginServerName: "a1",
@ -99,11 +99,11 @@ func TestOriginRequestConfigOverrides(t *testing.T) {
// Rule 1 overrode all the root-level config.
actual1 := ing.Rules[1].Config
expected1 := OriginRequestConfig{
ConnectTimeout: 2 * time.Minute,
TLSTimeout: 2 * time.Second,
TCPKeepAlive: 2 * time.Second,
ConnectTimeout: config.CustomDuration{Duration: 2 * time.Minute},
TLSTimeout: config.CustomDuration{Duration: 2 * time.Second},
TCPKeepAlive: config.CustomDuration{Duration: 2 * time.Second},
NoHappyEyeballs: false,
KeepAliveTimeout: 2 * time.Second,
KeepAliveTimeout: config.CustomDuration{Duration: 2 * time.Second},
KeepAliveConnections: 2,
HTTPHostHeader: "def",
OriginServerName: "b2",
@ -191,12 +191,12 @@ ingress:
rawConfig := []byte(`
{
"originRequest": {
"connectTimeout": 60000000000,
"tlsTimeout": 1000000000,
"connectTimeout": 60,
"tlsTimeout": 1,
"noHappyEyeballs": true,
"tcpKeepAlive": 1000000000,
"tcpKeepAlive": 1,
"keepAliveConnections": 1,
"keepAliveTimeout": 1000000000,
"keepAliveTimeout": 1,
"httpHostHeader": "abc",
"originServerName": "a1",
"caPool": "/tmp/path0",
@ -228,12 +228,12 @@ ingress:
"hostname": "*",
"service": "https://localhost:8001",
"originRequest": {
"connectTimeout": 120000000000,
"tlsTimeout": 2000000000,
"connectTimeout": 120,
"tlsTimeout": 2,
"noHappyEyeballs": false,
"tcpKeepAlive": 2000000000,
"tcpKeepAlive": 2,
"keepAliveConnections": 2,
"keepAliveTimeout": 2000000000,
"keepAliveTimeout": 2,
"httpHostHeader": "def",
"originServerName": "b2",
"caPool": "/tmp/path1",
@ -274,7 +274,7 @@ func TestOriginRequestConfigDefaults(t *testing.T) {
// Rule 0 didn't override anything, so it inherits the cloudflared defaults
actual0 := ing.Rules[0].Config
expected0 := OriginRequestConfig{
ConnectTimeout: defaultConnectTimeout,
ConnectTimeout: defaultHTTPConnectTimeout,
TLSTimeout: defaultTLSTimeout,
TCPKeepAlive: defaultTCPKeepAlive,
KeepAliveConnections: defaultKeepAliveConnections,
@ -286,11 +286,11 @@ func TestOriginRequestConfigDefaults(t *testing.T) {
// Rule 1 overrode all defaults.
actual1 := ing.Rules[1].Config
expected1 := OriginRequestConfig{
ConnectTimeout: 2 * time.Minute,
TLSTimeout: 2 * time.Second,
TCPKeepAlive: 2 * time.Second,
ConnectTimeout: config.CustomDuration{Duration: 2 * time.Minute},
TLSTimeout: config.CustomDuration{Duration: 2 * time.Second},
TCPKeepAlive: config.CustomDuration{Duration: 2 * time.Second},
NoHappyEyeballs: false,
KeepAliveTimeout: 2 * time.Second,
KeepAliveTimeout: config.CustomDuration{Duration: 2 * time.Second},
KeepAliveConnections: 2,
HTTPHostHeader: "def",
OriginServerName: "b2",
@ -360,12 +360,12 @@ ingress:
"hostname": "*",
"service": "https://localhost:8001",
"originRequest": {
"connectTimeout": 120000000000,
"tlsTimeout": 2000000000,
"connectTimeout": 120,
"tlsTimeout": 2,
"noHappyEyeballs": false,
"tcpKeepAlive": 2000000000,
"tcpKeepAlive": 2,
"keepAliveConnections": 2,
"keepAliveTimeout": 2000000000,
"keepAliveTimeout": 2,
"httpHostHeader": "def",
"originServerName": "b2",
"caPool": "/tmp/path1",
@ -404,7 +404,7 @@ func TestDefaultConfigFromCLI(t *testing.T) {
c := cli.NewContext(nil, set, nil)
expected := OriginRequestConfig{
ConnectTimeout: defaultConnectTimeout,
ConnectTimeout: defaultHTTPConnectTimeout,
TLSTimeout: defaultTLSTimeout,
TCPKeepAlive: defaultTCPKeepAlive,
KeepAliveConnections: defaultKeepAliveConnections,

272
ingress/icmp_darwin.go Normal file
View File

@ -0,0 +1,272 @@
//go:build darwin
package ingress
// This file implements ICMPProxy for Darwin. It uses a non-privileged ICMP socket to send echo requests and listen for
// echo replies. The source IP of the requests are rewritten to the bind IP of the socket and the socket reads all
// messages, so we use echo ID to distinguish the replies. Each (source IP, destination IP, echo ID) is assigned a
// unique echo ID.
import (
"context"
"fmt"
"math"
"net"
"net/netip"
"strconv"
"sync"
"time"
"github.com/rs/zerolog"
"golang.org/x/net/icmp"
"github.com/cloudflare/cloudflared/packet"
)
type icmpProxy struct {
srcFunnelTracker *packet.FunnelTracker
echoIDTracker *echoIDTracker
conn *icmp.PacketConn
// Response is handled in one-by-one, so encoder can be shared between funnels
encoder *packet.Encoder
logger *zerolog.Logger
idleTimeout time.Duration
}
// echoIDTracker tracks which ID has been assigned. It first loops through assignment from lastAssignment to then end,
// then from the beginning to lastAssignment.
// ICMP echo are short lived. By the time an ID is revisited, it should have been released.
type echoIDTracker struct {
lock sync.Mutex
// maps the source IP, destination IP and original echo ID to a unique echo ID obtained from assignment
mapping map[flow3Tuple]uint16
// assignment tracks if an ID is assigned using index as the ID
// The size of the array is math.MaxUint16 because echo ID is 2 bytes
assignment [math.MaxUint16]bool
// nextAssignment is the next number to check for assigment
nextAssignment uint16
}
func newEchoIDTracker() *echoIDTracker {
return &echoIDTracker{
mapping: make(map[flow3Tuple]uint16),
}
}
// Get assignment or assign a new ID.
func (eit *echoIDTracker) getOrAssign(key flow3Tuple) (id uint16, success bool) {
eit.lock.Lock()
defer eit.lock.Unlock()
id, exists := eit.mapping[key]
if exists {
return id, true
}
if eit.nextAssignment == math.MaxUint16 {
eit.nextAssignment = 0
}
for i, assigned := range eit.assignment[eit.nextAssignment:] {
if !assigned {
echoID := uint16(i) + eit.nextAssignment
eit.set(key, echoID)
return echoID, true
}
}
for i, assigned := range eit.assignment[0:eit.nextAssignment] {
if !assigned {
echoID := uint16(i)
eit.set(key, echoID)
return echoID, true
}
}
return 0, false
}
// Caller should hold the lock
func (eit *echoIDTracker) set(key flow3Tuple, assignedEchoID uint16) {
eit.assignment[assignedEchoID] = true
eit.mapping[key] = assignedEchoID
eit.nextAssignment = assignedEchoID + 1
}
func (eit *echoIDTracker) release(key flow3Tuple, assigned uint16) bool {
eit.lock.Lock()
defer eit.lock.Unlock()
currentEchoID, exists := eit.mapping[key]
if exists && assigned == currentEchoID {
delete(eit.mapping, key)
eit.assignment[assigned] = false
return true
}
return false
}
type echoFunnelID uint16
func (snf echoFunnelID) Type() string {
return "echoID"
}
func (snf echoFunnelID) String() string {
return strconv.FormatUint(uint64(snf), 10)
}
func newICMPProxy(listenIP netip.Addr, zone string, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) {
conn, err := newICMPConn(listenIP, zone)
if err != nil {
return nil, err
}
logger.Info().Msgf("Created ICMP proxy listening on %s", conn.LocalAddr())
return &icmpProxy{
srcFunnelTracker: packet.NewFunnelTracker(),
echoIDTracker: newEchoIDTracker(),
encoder: packet.NewEncoder(),
conn: conn,
logger: logger,
idleTimeout: idleTimeout,
}, nil
}
func (ip *icmpProxy) Request(pk *packet.ICMP, responder packet.FunnelUniPipe) error {
if pk == nil {
return errPacketNil
}
originalEcho, err := getICMPEcho(pk.Message)
if err != nil {
return err
}
echoIDTrackerKey := flow3Tuple{
srcIP: pk.Src,
dstIP: pk.Dst,
originalEchoID: originalEcho.ID,
}
// TODO: TUN-6744 assign unique flow per (src, echo ID)
assignedEchoID, success := ip.echoIDTracker.getOrAssign(echoIDTrackerKey)
if !success {
return fmt.Errorf("failed to assign unique echo ID")
}
newFunnelFunc := func() (packet.Funnel, error) {
originalEcho, err := getICMPEcho(pk.Message)
if err != nil {
return nil, err
}
originSender := originSender{
conn: ip.conn,
echoIDTracker: ip.echoIDTracker,
echoIDTrackerKey: echoIDTrackerKey,
assignedEchoID: assignedEchoID,
}
icmpFlow := newICMPEchoFlow(pk.Src, &originSender, responder, int(assignedEchoID), originalEcho.ID, ip.encoder)
return icmpFlow, nil
}
funnelID := echoFunnelID(assignedEchoID)
funnel, isNew, err := ip.srcFunnelTracker.GetOrRegister(funnelID, newFunnelFunc)
if err != nil {
return err
}
if isNew {
ip.logger.Debug().
Str("src", pk.Src.String()).
Str("dst", pk.Dst.String()).
Int("originalEchoID", originalEcho.ID).
Int("assignedEchoID", int(assignedEchoID)).
Msg("New flow")
}
icmpFlow, err := toICMPEchoFlow(funnel)
if err != nil {
return err
}
return icmpFlow.sendToDst(pk.Dst, pk.Message)
}
// Serve listens for responses to the requests until context is done
func (ip *icmpProxy) Serve(ctx context.Context) error {
go func() {
<-ctx.Done()
ip.conn.Close()
}()
go func() {
ip.srcFunnelTracker.ScheduleCleanup(ctx, ip.idleTimeout)
}()
buf := make([]byte, mtu)
icmpDecoder := packet.NewICMPDecoder()
for {
n, from, err := ip.conn.ReadFrom(buf)
if err != nil {
return err
}
reply, err := parseReply(from, buf[:n])
if err != nil {
ip.logger.Debug().Err(err).Str("dst", from.String()).Msg("Failed to parse ICMP reply, continue to parse as full packet")
// In unit test, we found out when the listener listens on 0.0.0.0, the socket reads the full packet after
// the second reply
if err := ip.handleFullPacket(icmpDecoder, buf[:n]); err != nil {
ip.logger.Debug().Err(err).Str("dst", from.String()).Msg("Failed to parse ICMP reply as full packet")
}
continue
}
if !isEchoReply(reply.msg) {
ip.logger.Debug().Str("dst", from.String()).Msgf("Drop ICMP %s from reply", reply.msg.Type)
continue
}
if err := ip.sendReply(reply); err != nil {
ip.logger.Error().Err(err).Str("dst", from.String()).Msg("Failed to send ICMP reply")
continue
}
}
}
func (ip *icmpProxy) handleFullPacket(decoder *packet.ICMPDecoder, rawPacket []byte) error {
icmpPacket, err := decoder.Decode(packet.RawPacket{Data: rawPacket})
if err != nil {
return err
}
echo, err := getICMPEcho(icmpPacket.Message)
if err != nil {
return err
}
reply := echoReply{
from: icmpPacket.Src,
msg: icmpPacket.Message,
echo: echo,
}
if ip.sendReply(&reply); err != nil {
return err
}
return nil
}
func (ip *icmpProxy) sendReply(reply *echoReply) error {
funnelID := echoFunnelID(reply.echo.ID)
funnel, ok := ip.srcFunnelTracker.Get(funnelID)
if !ok {
return packet.ErrFunnelNotFound
}
icmpFlow, err := toICMPEchoFlow(funnel)
if err != nil {
return err
}
return icmpFlow.returnToSrc(reply)
}
// originSender wraps icmp.PacketConn to implement packet.FunnelUniPipe interface
type originSender struct {
conn *icmp.PacketConn
echoIDTracker *echoIDTracker
echoIDTrackerKey flow3Tuple
assignedEchoID uint16
}
func (os *originSender) SendPacket(dst netip.Addr, pk packet.RawPacket) error {
_, err := os.conn.WriteTo(pk.Data, &net.UDPAddr{
IP: dst.AsSlice(),
})
return err
}
func (os *originSender) Close() error {
os.echoIDTracker.release(os.echoIDTrackerKey, os.assignedEchoID)
return nil
}

121
ingress/icmp_darwin_test.go Normal file
View File

@ -0,0 +1,121 @@
//go:build darwin
package ingress
import (
"math"
"net/netip"
"testing"
"github.com/stretchr/testify/require"
)
func TestSingleEchoIDTracker(t *testing.T) {
tracker := newEchoIDTracker()
key := flow3Tuple{
srcIP: netip.MustParseAddr("172.16.0.1"),
dstIP: netip.MustParseAddr("172.16.0.2"),
originalEchoID: 5182,
}
// not assigned yet, so nothing to release
require.False(t, tracker.release(key, 0))
echoID, ok := tracker.getOrAssign(key)
require.True(t, ok)
require.Equal(t, uint16(0), echoID)
// Second time should return the same echo ID
echoID, ok = tracker.getOrAssign(key)
require.True(t, ok)
require.Equal(t, uint16(0), echoID)
// releasing a different ID returns false
require.False(t, tracker.release(key, 1999))
require.True(t, tracker.release(key, echoID))
// releasing the second time returns false
require.False(t, tracker.release(key, echoID))
// Move to the next IP
echoID, ok = tracker.getOrAssign(key)
require.True(t, ok)
require.Equal(t, uint16(1), echoID)
}
func TestFullEchoIDTracker(t *testing.T) {
var (
dstIP = netip.MustParseAddr("192.168.0.1")
originalEchoID = 41820
)
tracker := newEchoIDTracker()
firstSrcIP := netip.MustParseAddr("172.16.0.1")
srcIP := firstSrcIP
for i := uint16(0); i < math.MaxUint16; i++ {
key := flow3Tuple{
srcIP: srcIP,
dstIP: dstIP,
originalEchoID: originalEchoID,
}
echoID, ok := tracker.getOrAssign(key)
require.True(t, ok)
require.Equal(t, i, echoID)
echoID, ok = tracker.get(key)
require.True(t, ok)
require.Equal(t, i, echoID)
srcIP = srcIP.Next()
}
key := flow3Tuple{
srcIP: srcIP.Next(),
dstIP: dstIP,
originalEchoID: originalEchoID,
}
// All echo IDs are assigned
echoID, ok := tracker.getOrAssign(key)
require.False(t, ok)
require.Equal(t, uint16(0), echoID)
srcIP = firstSrcIP
for i := uint16(0); i < math.MaxUint16; i++ {
key := flow3Tuple{
srcIP: srcIP,
dstIP: dstIP,
originalEchoID: originalEchoID,
}
ok := tracker.release(key, i)
require.True(t, ok)
echoID, ok = tracker.get(key)
require.False(t, ok)
require.Equal(t, uint16(0), echoID)
srcIP = srcIP.Next()
}
// The IDs are assignable again
srcIP = firstSrcIP
for i := uint16(0); i < math.MaxUint16; i++ {
key := flow3Tuple{
srcIP: srcIP,
dstIP: dstIP,
originalEchoID: originalEchoID,
}
echoID, ok := tracker.getOrAssign(key)
require.True(t, ok)
require.Equal(t, i, echoID)
echoID, ok = tracker.get(key)
require.True(t, ok)
require.Equal(t, i, echoID)
srcIP = srcIP.Next()
}
}
func (eit *echoIDTracker) get(key flow3Tuple) (id uint16, exist bool) {
eit.lock.Lock()
defer eit.lock.Unlock()
id, exists := eit.mapping[key]
return id, exists
}

31
ingress/icmp_generic.go Normal file
View File

@ -0,0 +1,31 @@
//go:build !darwin && !linux && (!windows || !cgo)
package ingress
import (
"context"
"fmt"
"net/netip"
"runtime"
"time"
"github.com/rs/zerolog"
"github.com/cloudflare/cloudflared/packet"
)
var errICMPProxyNotImplemented = fmt.Errorf("ICMP proxy is not implemented on %s %s", runtime.GOOS, runtime.GOARCH)
type icmpProxy struct{}
func (ip icmpProxy) Request(pk *packet.ICMP, responder packet.FunnelUniPipe) error {
return errICMPProxyNotImplemented
}
func (ip *icmpProxy) Serve(ctx context.Context) error {
return errICMPProxyNotImplemented
}
func newICMPProxy(listenIP netip.Addr, zone string, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) {
return nil, errICMPProxyNotImplemented
}

213
ingress/icmp_linux.go Normal file
View File

@ -0,0 +1,213 @@
//go:build linux
package ingress
// This file implements ICMPProxy for Linux. Each (source IP, destination IP, echo ID) opens a non-privileged ICMP socket.
// The source IP of the requests are rewritten to the bind IP of the socket and echo ID rewritten to the port number of
// the socket. The kernel ensures the socket only reads replies whose echo ID matches the port number.
// For more information about the socket, see https://man7.org/linux/man-pages/man7/icmp.7.html and https://lwn.net/Articles/422330/
import (
"context"
"fmt"
"net"
"net/netip"
"os"
"regexp"
"strconv"
"time"
"github.com/pkg/errors"
"github.com/rs/zerolog"
"golang.org/x/net/icmp"
"github.com/cloudflare/cloudflared/packet"
)
const (
// https://lwn.net/Articles/550551/ IPv4 and IPv6 share the same path
pingGroupPath = "/proc/sys/net/ipv4/ping_group_range"
)
var (
findGroupIDRegex = regexp.MustCompile(`\d+`)
)
type icmpProxy struct {
srcFunnelTracker *packet.FunnelTracker
listenIP netip.Addr
ipv6Zone string
logger *zerolog.Logger
idleTimeout time.Duration
}
func newICMPProxy(listenIP netip.Addr, zone string, logger *zerolog.Logger, idleTimeout time.Duration) (*icmpProxy, error) {
if err := testPermission(listenIP, zone, logger); err != nil {
return nil, err
}
return &icmpProxy{
srcFunnelTracker: packet.NewFunnelTracker(),
listenIP: listenIP,
ipv6Zone: zone,
logger: logger,
idleTimeout: idleTimeout,
}, nil
}
func testPermission(listenIP netip.Addr, zone string, logger *zerolog.Logger) error {
// Opens a non-privileged ICMP socket. On Linux the group ID of the process needs to be in ping_group_range
// Only check ping_group_range once for IPv4
if listenIP.Is4() {
if err := checkInPingGroup(); err != nil {
logger.Warn().Err(err).Msgf("The user running cloudflared process has a GID (group ID) that is not within ping_group_range. You might need to add that user to a group within that range, or instead update the range to encompass a group the user is already in by modifying %s. Otherwise cloudflared will not be able to ping this network", pingGroupPath)
return err
}
}
conn, err := newICMPConn(listenIP, zone)
if err != nil {
return err
}
// This conn is only to test if cloudflared has permission to open this type of socket
conn.Close()
return nil
}
func checkInPingGroup() error {
file, err := os.ReadFile(pingGroupPath)
if err != nil {
return err
}
groupID := os.Getgid()
// Example content: 999 59999
found := findGroupIDRegex.FindAll(file, 2)
if len(found) == 2 {
groupMin, err := strconv.ParseInt(string(found[0]), 10, 32)
if err != nil {
return errors.Wrapf(err, "failed to determine minimum ping group ID")
}
groupMax, err := strconv.ParseInt(string(found[1]), 10, 32)
if err != nil {
return errors.Wrapf(err, "failed to determine minimum ping group ID")
}
if groupID < int(groupMin) || groupID > int(groupMax) {
return fmt.Errorf("Group ID %d is not between ping group %d to %d", groupID, groupMin, groupMax)
}
return nil
}
return fmt.Errorf("did not find group range in %s", pingGroupPath)
}
func (ip *icmpProxy) Request(pk *packet.ICMP, responder packet.FunnelUniPipe) error {
if pk == nil {
return errPacketNil
}
originalEcho, err := getICMPEcho(pk.Message)
if err != nil {
return err
}
newConnChan := make(chan *icmp.PacketConn, 1)
newFunnelFunc := func() (packet.Funnel, error) {
conn, err := newICMPConn(ip.listenIP, ip.ipv6Zone)
if err != nil {
return nil, errors.Wrap(err, "failed to open ICMP socket")
}
ip.logger.Debug().Msgf("Opened ICMP socket listen on %s", conn.LocalAddr())
newConnChan <- conn
localUDPAddr, ok := conn.LocalAddr().(*net.UDPAddr)
if !ok {
return nil, fmt.Errorf("ICMP listener address %s is not net.UDPAddr", conn.LocalAddr())
}
originSender := originSender{conn: conn}
echoID := localUDPAddr.Port
icmpFlow := newICMPEchoFlow(pk.Src, &originSender, responder, echoID, originalEcho.ID, packet.NewEncoder())
return icmpFlow, nil
}
funnelID := flow3Tuple{
srcIP: pk.Src,
dstIP: pk.Dst,
originalEchoID: originalEcho.ID,
}
funnel, isNew, err := ip.srcFunnelTracker.GetOrRegister(funnelID, newFunnelFunc)
if err != nil {
return err
}
icmpFlow, err := toICMPEchoFlow(funnel)
if err != nil {
return err
}
if isNew {
ip.logger.Debug().
Str("src", pk.Src.String()).
Str("dst", pk.Dst.String()).
Int("originalEchoID", originalEcho.ID).
Msg("New flow")
conn := <-newConnChan
go func() {
defer ip.srcFunnelTracker.Unregister(funnelID, icmpFlow)
if err := ip.listenResponse(icmpFlow, conn); err != nil {
ip.logger.Debug().Err(err).
Str("src", pk.Src.String()).
Str("dst", pk.Dst.String()).
Int("originalEchoID", originalEcho.ID).
Msg("Failed to listen for ICMP echo response")
}
}()
}
if err := icmpFlow.sendToDst(pk.Dst, pk.Message); err != nil {
return errors.Wrap(err, "failed to send ICMP echo request")
}
return nil
}
func (ip *icmpProxy) Serve(ctx context.Context) error {
ip.srcFunnelTracker.ScheduleCleanup(ctx, ip.idleTimeout)
return ctx.Err()
}
func (ip *icmpProxy) listenResponse(flow *icmpEchoFlow, conn *icmp.PacketConn) error {
buf := make([]byte, mtu)
for {
n, from, err := conn.ReadFrom(buf)
if err != nil {
return err
}
reply, err := parseReply(from, buf[:n])
if err != nil {
ip.logger.Error().Err(err).Str("dst", from.String()).Msg("Failed to parse ICMP reply")
continue
}
if !isEchoReply(reply.msg) {
ip.logger.Debug().Str("dst", from.String()).Msgf("Drop ICMP %s from reply", reply.msg.Type)
continue
}
if err := flow.returnToSrc(reply); err != nil {
ip.logger.Err(err).Str("dst", from.String()).Msg("Failed to send ICMP reply")
continue
}
}
}
// originSender wraps icmp.PacketConn to implement packet.FunnelUniPipe interface
type originSender struct {
conn *icmp.PacketConn
}
func (os *originSender) SendPacket(dst netip.Addr, pk packet.RawPacket) error {
_, err := os.conn.WriteTo(pk.Data, &net.UDPAddr{
IP: dst.AsSlice(),
})
return err
}
func (os *originSender) Close() error {
return os.conn.Close()
}
// Only linux uses flow3Tuple as FunnelID
func (ft flow3Tuple) Type() string {
return "srcIP_dstIP_echoID"
}
func (ft flow3Tuple) String() string {
return fmt.Sprintf("%s:%s:%d", ft.srcIP, ft.dstIP, ft.originalEchoID)
}

142
ingress/icmp_posix.go Normal file
View File

@ -0,0 +1,142 @@
//go:build darwin || linux
package ingress
// This file extracts logic shared by Linux and Darwin implementation if ICMPProxy.
import (
"fmt"
"net"
"net/netip"
"github.com/google/gopacket/layers"
"golang.org/x/net/icmp"
"github.com/cloudflare/cloudflared/packet"
)
// Opens a non-privileged ICMP socket on Linux and Darwin
func newICMPConn(listenIP netip.Addr, zone string) (*icmp.PacketConn, error) {
if listenIP.Is4() {
return icmp.ListenPacket("udp4", listenIP.String())
}
listenAddr := listenIP.String()
if zone != "" {
listenAddr = listenAddr + "%" + zone
}
return icmp.ListenPacket("udp6", listenAddr)
}
func netipAddr(addr net.Addr) (netip.Addr, bool) {
udpAddr, ok := addr.(*net.UDPAddr)
if !ok {
return netip.Addr{}, false
}
return netip.AddrFromSlice(udpAddr.IP)
}
type flow3Tuple struct {
srcIP netip.Addr
dstIP netip.Addr
originalEchoID int
}
// icmpEchoFlow implements the packet.Funnel interface.
type icmpEchoFlow struct {
*packet.RawPacketFunnel
assignedEchoID int
originalEchoID int
// it's up to the user to ensure respEncoder is not used concurrently
respEncoder *packet.Encoder
}
func newICMPEchoFlow(src netip.Addr, sendPipe, returnPipe packet.FunnelUniPipe, assignedEchoID, originalEchoID int, respEncoder *packet.Encoder) *icmpEchoFlow {
return &icmpEchoFlow{
RawPacketFunnel: packet.NewRawPacketFunnel(src, sendPipe, returnPipe),
assignedEchoID: assignedEchoID,
originalEchoID: originalEchoID,
respEncoder: respEncoder,
}
}
// sendToDst rewrites the echo ID to the one assigned to this flow
func (ief *icmpEchoFlow) sendToDst(dst netip.Addr, msg *icmp.Message) error {
originalEcho, err := getICMPEcho(msg)
if err != nil {
return err
}
sendMsg := icmp.Message{
Type: msg.Type,
Code: msg.Code,
Body: &icmp.Echo{
ID: ief.assignedEchoID,
Seq: originalEcho.Seq,
Data: originalEcho.Data,
},
}
// For IPv4, the pseudoHeader is not used because the checksum is always calculated
var pseudoHeader []byte = nil
serializedPacket, err := sendMsg.Marshal(pseudoHeader)
if err != nil {
return err
}
return ief.SendToDst(dst, packet.RawPacket{Data: serializedPacket})
}
// returnToSrc rewrites the echo ID to the original echo ID from the eyeball
func (ief *icmpEchoFlow) returnToSrc(reply *echoReply) error {
reply.echo.ID = ief.originalEchoID
reply.msg.Body = reply.echo
pk := packet.ICMP{
IP: &packet.IP{
Src: reply.from,
Dst: ief.Src,
Protocol: layers.IPProtocol(reply.msg.Type.Protocol()),
TTL: packet.DefaultTTL,
},
Message: reply.msg,
}
serializedPacket, err := ief.respEncoder.Encode(&pk)
if err != nil {
return err
}
return ief.ReturnToSrc(serializedPacket)
}
type echoReply struct {
from netip.Addr
msg *icmp.Message
echo *icmp.Echo
}
func parseReply(from net.Addr, rawMsg []byte) (*echoReply, error) {
fromAddr, ok := netipAddr(from)
if !ok {
return nil, fmt.Errorf("cannot convert %s to netip.Addr", from)
}
proto := layers.IPProtocolICMPv4
if fromAddr.Is6() {
proto = layers.IPProtocolICMPv6
}
msg, err := icmp.ParseMessage(int(proto), rawMsg)
if err != nil {
return nil, err
}
echo, err := getICMPEcho(msg)
if err != nil {
return nil, err
}
return &echoReply{
from: fromAddr,
msg: msg,
echo: echo,
}, nil
}
func toICMPEchoFlow(funnel packet.Funnel) (*icmpEchoFlow, error) {
icmpFlow, ok := funnel.(*icmpEchoFlow)
if !ok {
return nil, fmt.Errorf("%v is not *ICMPEchoFunnel", funnel)
}
return icmpFlow, nil
}

Some files were not shown because too many files have changed in this diff Show More