Compare commits
28 Commits
Author | SHA1 | Date |
---|---|---|
|
02705c44b2 | |
|
ce27840573 | |
|
40dc601e9d | |
|
e5578cb74e | |
|
bb765e741d | |
|
10081602a4 | |
|
236fcf56d6 | |
|
73a9980f38 | |
|
86e8585563 | |
|
d8a066628b | |
|
553e77e061 | |
|
8f94f54ec7 | |
|
2827b2fe8f | |
|
6dc8ed710e | |
|
e0b1ac0d05 | |
|
e7c5eb54af | |
|
cfec602fa7 | |
|
6fceb94998 | |
|
cf817f7036 | |
|
c8724a290a | |
|
e7586153be | |
|
11777db304 | |
|
3f6b1f24d0 | |
|
a4105e8708 | |
|
6496322bee | |
|
906452a9c9 | |
|
d969fdec3e | |
|
7336a1a4d6 |
|
@ -0,0 +1,131 @@
|
||||||
|
stages: [build, release]
|
||||||
|
|
||||||
|
default:
|
||||||
|
id_tokens:
|
||||||
|
VAULT_ID_TOKEN:
|
||||||
|
aud: https://vault.cfdata.org
|
||||||
|
|
||||||
|
# This before_script is injected into every job that runs on master meaning that if there is no tag the step
|
||||||
|
# will succeed but only write "No tag present - Skipping" to the console.
|
||||||
|
.check_tag:
|
||||||
|
before_script:
|
||||||
|
- |
|
||||||
|
# Check if there is a Git tag pointing to HEAD
|
||||||
|
echo "Tag found: $(git tag --points-at HEAD | grep .)"
|
||||||
|
if git tag --points-at HEAD | grep .; then
|
||||||
|
echo "Tag found: $(git tag --points-at HEAD | grep .)"
|
||||||
|
export "VERSION=$(git tag --points-at HEAD | grep .)"
|
||||||
|
else
|
||||||
|
echo "No tag present — skipping."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
## A set of predefined rules to use on the different jobs
|
||||||
|
.default_rules:
|
||||||
|
# Rules to run the job only on the master branch
|
||||||
|
run_on_master:
|
||||||
|
- if: $CI_COMMIT_BRANCH == $CI_DEFAULT_BRANCH
|
||||||
|
when: always
|
||||||
|
- when: never
|
||||||
|
# Rules to run the job only on branches that are not master. This is needed because for now
|
||||||
|
# we need to keep a similar behavior due to the integration with teamcity, which requires us
|
||||||
|
# to not trigger pipelines on tags and/or merge requests.
|
||||||
|
run_on_branch:
|
||||||
|
- if: $CI_COMMIT_TAG
|
||||||
|
when: never
|
||||||
|
- if: $CI_PIPELINE_SOURCE != "merge_request_event" && $CI_COMMIT_BRANCH != $CI_DEFAULT_BRANCH
|
||||||
|
when: always
|
||||||
|
- when: never
|
||||||
|
|
||||||
|
# -----------------------------------------------
|
||||||
|
# Stage 1: Build on every PR
|
||||||
|
# -----------------------------------------------
|
||||||
|
build_cloudflared_macos: &build
|
||||||
|
stage: build
|
||||||
|
rules:
|
||||||
|
- !reference [.default_rules, run_on_branch]
|
||||||
|
tags:
|
||||||
|
- "macstadium-${RUNNER_ARCH}"
|
||||||
|
parallel:
|
||||||
|
matrix:
|
||||||
|
- RUNNER_ARCH: [arm, intel]
|
||||||
|
artifacts:
|
||||||
|
paths:
|
||||||
|
- artifacts/*
|
||||||
|
script:
|
||||||
|
- '[ "${RUNNER_ARCH}" = "arm" ] && export TARGET_ARCH=arm64'
|
||||||
|
- '[ "${RUNNER_ARCH}" = "intel" ] && export TARGET_ARCH=amd64'
|
||||||
|
- ARCH=$(uname -m)
|
||||||
|
- echo ARCH=$ARCH - TARGET_ARCH=$TARGET_ARCH
|
||||||
|
- ./.teamcity/mac/install-cloudflare-go.sh
|
||||||
|
- export PATH="/tmp/go/bin:$PATH"
|
||||||
|
- BUILD_SCRIPT=.teamcity/mac/build.sh
|
||||||
|
- if [[ ! -x ${BUILD_SCRIPT} ]] ; then exit ; fi
|
||||||
|
- set -euo pipefail
|
||||||
|
- echo "Executing ${BUILD_SCRIPT}"
|
||||||
|
- exec ${BUILD_SCRIPT}
|
||||||
|
|
||||||
|
# -----------------------------------------------
|
||||||
|
# Stage 1: Build and sign only on releases
|
||||||
|
# -----------------------------------------------
|
||||||
|
build_and_sign_cloudflared_macos:
|
||||||
|
<<: *build
|
||||||
|
rules:
|
||||||
|
- !reference [.default_rules, run_on_master]
|
||||||
|
secrets:
|
||||||
|
APPLE_DEV_CA_CERT:
|
||||||
|
vault: gitlab/cloudflare/tun/cloudflared/_branch/master/apple_dev_ca_cert_v2/data@kv
|
||||||
|
file: false
|
||||||
|
CFD_CODE_SIGN_CERT:
|
||||||
|
vault: gitlab/cloudflare/tun/cloudflared/_branch/master/cfd_code_sign_cert_v2/data@kv
|
||||||
|
file: false
|
||||||
|
CFD_CODE_SIGN_KEY:
|
||||||
|
vault: gitlab/cloudflare/tun/cloudflared/_branch/master/cfd_code_sign_key_v2/data@kv
|
||||||
|
file: false
|
||||||
|
CFD_CODE_SIGN_PASS:
|
||||||
|
vault: gitlab/cloudflare/tun/cloudflared/_branch/master/cfd_code_sign_pass_v2/data@kv
|
||||||
|
file: false
|
||||||
|
CFD_INSTALLER_CERT:
|
||||||
|
vault: gitlab/cloudflare/tun/cloudflared/_branch/master/cfd_installer_cert_v2/data@kv
|
||||||
|
file: false
|
||||||
|
CFD_INSTALLER_KEY:
|
||||||
|
vault: gitlab/cloudflare/tun/cloudflared/_branch/master/cfd_installer_key_v2/data@kv
|
||||||
|
file: false
|
||||||
|
CFD_INSTALLER_PASS:
|
||||||
|
vault: gitlab/cloudflare/tun/cloudflared/_branch/master/cfd_installer_pass_v2/data@kv
|
||||||
|
file: false
|
||||||
|
|
||||||
|
# -----------------------------------------------
|
||||||
|
# Stage 2: Release to Github after building and signing
|
||||||
|
# -----------------------------------------------
|
||||||
|
release_cloudflared_macos_to_github:
|
||||||
|
stage: release
|
||||||
|
image: docker-registry.cfdata.org/stash/tun/docker-images/cloudflared-ci/main:6-8616fe631b76-amd64@sha256:96f4fd05e66cec03e0864c1bcf09324c130d4728eef45ee994716da499183614
|
||||||
|
extends: .check_tag
|
||||||
|
dependencies:
|
||||||
|
- build_and_sign_cloudflared_macos
|
||||||
|
rules:
|
||||||
|
- !reference [.default_rules, run_on_master]
|
||||||
|
cache:
|
||||||
|
paths:
|
||||||
|
- .cache/pip
|
||||||
|
variables:
|
||||||
|
PIP_CACHE_DIR: "$CI_PROJECT_DIR/.cache/pip"
|
||||||
|
KV_NAMESPACE: 380e19aa04314648949b6ad841417ebe
|
||||||
|
KV_ACCOUNT: 5ab4e9dfbd435d24068829fda0077963
|
||||||
|
secrets:
|
||||||
|
KV_API_TOKEN:
|
||||||
|
vault: gitlab/cloudflare/tun/cloudflared/_dev/cfd_kv_api_token/data@kv
|
||||||
|
file: false
|
||||||
|
API_KEY:
|
||||||
|
vault: gitlab/cloudflare/tun/cloudflared/_dev/cfd_github_api_key/data@kv
|
||||||
|
file: false
|
||||||
|
script:
|
||||||
|
- python3 --version ; pip --version # For debugging
|
||||||
|
- python3 -m venv venv
|
||||||
|
- source venv/bin/activate
|
||||||
|
- pip install pynacl==1.4.0 pygithub==1.55
|
||||||
|
- echo $VERSION
|
||||||
|
- echo $TAG_EXISTS
|
||||||
|
- echo "Running release because tag exists."
|
||||||
|
- make macos-release
|
|
@ -49,7 +49,7 @@ import_certificate() {
|
||||||
echo -n -e ${CERTIFICATE_ENV_VAR} | base64 -D > ${CERTIFICATE_FILE_NAME}
|
echo -n -e ${CERTIFICATE_ENV_VAR} | base64 -D > ${CERTIFICATE_FILE_NAME}
|
||||||
# we set || true here and for every `security import invoke` because the "duplicate SecKeychainItemImport" error
|
# we set || true here and for every `security import invoke` because the "duplicate SecKeychainItemImport" error
|
||||||
# will cause set -e to exit 1. It is okay we do this because we deliberately handle this error in the lines below.
|
# will cause set -e to exit 1. It is okay we do this because we deliberately handle this error in the lines below.
|
||||||
local out=$(security import ${CERTIFICATE_FILE_NAME} -A 2>&1) || true
|
local out=$(security import ${CERTIFICATE_FILE_NAME} -T /usr/bin/pkgbuild -A 2>&1) || true
|
||||||
local exitcode=$?
|
local exitcode=$?
|
||||||
# delete the certificate from disk
|
# delete the certificate from disk
|
||||||
rm -rf ${CERTIFICATE_FILE_NAME}
|
rm -rf ${CERTIFICATE_FILE_NAME}
|
||||||
|
@ -68,6 +68,28 @@ import_certificate() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
create_cloudflared_build_keychain() {
|
||||||
|
# Reusing the private key password as the keychain key
|
||||||
|
local PRIVATE_KEY_PASS=$1
|
||||||
|
|
||||||
|
# Create keychain only if it doesn't already exist
|
||||||
|
if [ ! -f "$HOME/Library/Keychains/cloudflared_build_keychain.keychain-db" ]; then
|
||||||
|
security create-keychain -p "$PRIVATE_KEY_PASS" cloudflared_build_keychain
|
||||||
|
else
|
||||||
|
echo "Keychain already exists: cloudflared_build_keychain"
|
||||||
|
fi
|
||||||
|
|
||||||
|
# Append temp keychain to the user domain
|
||||||
|
security list-keychains -d user -s cloudflared_build_keychain $(security list-keychains -d user | sed s/\"//g)
|
||||||
|
|
||||||
|
# Remove relock timeout
|
||||||
|
security set-keychain-settings cloudflared_build_keychain
|
||||||
|
|
||||||
|
# Unlock keychain so it doesn't require password
|
||||||
|
security unlock-keychain -p "$PRIVATE_KEY_PASS" cloudflared_build_keychain
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
# Imports private keys to the Apple KeyChain
|
# Imports private keys to the Apple KeyChain
|
||||||
import_private_keys() {
|
import_private_keys() {
|
||||||
local PRIVATE_KEY_NAME=$1
|
local PRIVATE_KEY_NAME=$1
|
||||||
|
@ -83,7 +105,7 @@ import_private_keys() {
|
||||||
echo -n -e ${PRIVATE_KEY_ENV_VAR} | base64 -D > ${PRIVATE_KEY_FILE_NAME}
|
echo -n -e ${PRIVATE_KEY_ENV_VAR} | base64 -D > ${PRIVATE_KEY_FILE_NAME}
|
||||||
# we set || true here and for every `security import invoke` because the "duplicate SecKeychainItemImport" error
|
# we set || true here and for every `security import invoke` because the "duplicate SecKeychainItemImport" error
|
||||||
# will cause set -e to exit 1. It is okay we do this because we deliberately handle this error in the lines below.
|
# will cause set -e to exit 1. It is okay we do this because we deliberately handle this error in the lines below.
|
||||||
local out=$(security import ${PRIVATE_KEY_FILE_NAME} -A -P "${PRIVATE_KEY_PASS}" 2>&1) || true
|
local out=$(security import ${PRIVATE_KEY_FILE_NAME} -k cloudflared_build_keychain -P "$PRIVATE_KEY_PASS" -T /usr/bin/pkgbuild -A -P "${PRIVATE_KEY_PASS}" 2>&1) || true
|
||||||
local exitcode=$?
|
local exitcode=$?
|
||||||
rm -rf ${PRIVATE_KEY_FILE_NAME}
|
rm -rf ${PRIVATE_KEY_FILE_NAME}
|
||||||
if [ -n "$out" ]; then
|
if [ -n "$out" ]; then
|
||||||
|
@ -100,6 +122,9 @@ import_private_keys() {
|
||||||
fi
|
fi
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Create temp keychain only for this build
|
||||||
|
create_cloudflared_build_keychain "${CFD_CODE_SIGN_PASS}"
|
||||||
|
|
||||||
# Add Apple Root Developer certificate to the key chain
|
# Add Apple Root Developer certificate to the key chain
|
||||||
import_certificate "Apple Developer CA" "${APPLE_DEV_CA_CERT}" "${APPLE_CA_CERT}"
|
import_certificate "Apple Developer CA" "${APPLE_DEV_CA_CERT}" "${APPLE_CA_CERT}"
|
||||||
|
|
||||||
|
@ -119,8 +144,8 @@ import_certificate "Developer ID Installer" "${CFD_INSTALLER_CERT}" "${INSTALLER
|
||||||
if [[ ! -z "$CFD_CODE_SIGN_NAME" ]]; then
|
if [[ ! -z "$CFD_CODE_SIGN_NAME" ]]; then
|
||||||
CODE_SIGN_NAME="${CFD_CODE_SIGN_NAME}"
|
CODE_SIGN_NAME="${CFD_CODE_SIGN_NAME}"
|
||||||
else
|
else
|
||||||
if [[ -n "$(security find-certificate -c "Developer ID Application" | cut -d'"' -f 4 -s | grep "Developer ID Application:" | head -1)" ]]; then
|
if [[ -n "$(security find-certificate -c "Developer ID Application" cloudflared_build_keychain | cut -d'"' -f 4 -s | grep "Developer ID Application:" | head -1)" ]]; then
|
||||||
CODE_SIGN_NAME=$(security find-certificate -c "Developer ID Application" | cut -d'"' -f 4 -s | grep "Developer ID Application:" | head -1)
|
CODE_SIGN_NAME=$(security find-certificate -c "Developer ID Application" cloudflared_build_keychain | cut -d'"' -f 4 -s | grep "Developer ID Application:" | head -1)
|
||||||
else
|
else
|
||||||
CODE_SIGN_NAME=""
|
CODE_SIGN_NAME=""
|
||||||
fi
|
fi
|
||||||
|
@ -130,8 +155,8 @@ fi
|
||||||
if [[ ! -z "$CFD_INSTALLER_NAME" ]]; then
|
if [[ ! -z "$CFD_INSTALLER_NAME" ]]; then
|
||||||
PKG_SIGN_NAME="${CFD_INSTALLER_NAME}"
|
PKG_SIGN_NAME="${CFD_INSTALLER_NAME}"
|
||||||
else
|
else
|
||||||
if [[ -n "$(security find-certificate -c "Developer ID Installer" | cut -d'"' -f 4 -s | grep "Developer ID Installer:" | head -1)" ]]; then
|
if [[ -n "$(security find-certificate -c "Developer ID Installer" cloudflared_build_keychain | cut -d'"' -f 4 -s | grep "Developer ID Installer:" | head -1)" ]]; then
|
||||||
PKG_SIGN_NAME=$(security find-certificate -c "Developer ID Installer" | cut -d'"' -f 4 -s | grep "Developer ID Installer:" | head -1)
|
PKG_SIGN_NAME=$(security find-certificate -c "Developer ID Installer" cloudflared_build_keychain | cut -d'"' -f 4 -s | grep "Developer ID Installer:" | head -1)
|
||||||
else
|
else
|
||||||
PKG_SIGN_NAME=""
|
PKG_SIGN_NAME=""
|
||||||
fi
|
fi
|
||||||
|
@ -142,9 +167,16 @@ rm -rf "${TARGET_DIRECTORY}"
|
||||||
export TARGET_OS="darwin"
|
export TARGET_OS="darwin"
|
||||||
GOCACHE="$PWD/../../../../" GOPATH="$PWD/../../../../" CGO_ENABLED=1 make cloudflared
|
GOCACHE="$PWD/../../../../" GOPATH="$PWD/../../../../" CGO_ENABLED=1 make cloudflared
|
||||||
|
|
||||||
|
|
||||||
|
# This allows apple tools to use the certificates in the keychain without requiring password input.
|
||||||
|
# This command always needs to run after the certificates have been loaded into the keychain
|
||||||
|
if [[ ! -z "$CFD_CODE_SIGN_PASS" ]]; then
|
||||||
|
security set-key-partition-list -S apple-tool:,apple: -s -k "${CFD_CODE_SIGN_PASS}" cloudflared_build_keychain
|
||||||
|
fi
|
||||||
|
|
||||||
# sign the cloudflared binary
|
# sign the cloudflared binary
|
||||||
if [[ ! -z "$CODE_SIGN_NAME" ]]; then
|
if [[ ! -z "$CODE_SIGN_NAME" ]]; then
|
||||||
codesign -s "${CODE_SIGN_NAME}" -f -v --timestamp --options runtime ${BINARY_NAME}
|
codesign --keychain $HOME/Library/Keychains/cloudflared_build_keychain.keychain-db -s "${CODE_SIGN_NAME}" -fv --options runtime --timestamp ${BINARY_NAME}
|
||||||
|
|
||||||
# notarize the binary
|
# notarize the binary
|
||||||
# TODO: TUN-5789
|
# TODO: TUN-5789
|
||||||
|
@ -165,11 +197,13 @@ tar czf "$FILENAME" "${BINARY_NAME}"
|
||||||
|
|
||||||
# build the installer package
|
# build the installer package
|
||||||
if [[ ! -z "$PKG_SIGN_NAME" ]]; then
|
if [[ ! -z "$PKG_SIGN_NAME" ]]; then
|
||||||
|
|
||||||
pkgbuild --identifier com.cloudflare.${PRODUCT} \
|
pkgbuild --identifier com.cloudflare.${PRODUCT} \
|
||||||
--version ${VERSION} \
|
--version ${VERSION} \
|
||||||
--scripts ${ARCH_TARGET_DIRECTORY}/scripts \
|
--scripts ${ARCH_TARGET_DIRECTORY}/scripts \
|
||||||
--root ${ARCH_TARGET_DIRECTORY}/contents \
|
--root ${ARCH_TARGET_DIRECTORY}/contents \
|
||||||
--install-location /usr/local/bin \
|
--install-location /usr/local/bin \
|
||||||
|
--keychain cloudflared_build_keychain \
|
||||||
--sign "${PKG_SIGN_NAME}" \
|
--sign "${PKG_SIGN_NAME}" \
|
||||||
${PKGNAME}
|
${PKGNAME}
|
||||||
|
|
||||||
|
@ -187,3 +221,8 @@ fi
|
||||||
# cleanup build directory because this script is not ran within containers,
|
# cleanup build directory because this script is not ran within containers,
|
||||||
# which might lead to future issues in subsequent runs.
|
# which might lead to future issues in subsequent runs.
|
||||||
rm -rf "${TARGET_DIRECTORY}"
|
rm -rf "${TARGET_DIRECTORY}"
|
||||||
|
|
||||||
|
# cleanup the keychain
|
||||||
|
security default-keychain -d user -s login.keychain-db
|
||||||
|
security list-keychains -d user -s login.keychain-db
|
||||||
|
security delete-keychain cloudflared_build_keychain
|
||||||
|
|
|
@ -22,7 +22,7 @@ RUN .teamcity/install-cloudflare-go.sh
|
||||||
RUN PATH="/tmp/go/bin:$PATH" make cloudflared
|
RUN PATH="/tmp/go/bin:$PATH" make cloudflared
|
||||||
|
|
||||||
# use a distroless base image with glibc
|
# use a distroless base image with glibc
|
||||||
FROM gcr.io/distroless/base-debian11:nonroot
|
FROM gcr.io/distroless/base-debian12:nonroot
|
||||||
|
|
||||||
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"
|
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ RUN .teamcity/install-cloudflare-go.sh
|
||||||
RUN GOOS=linux GOARCH=amd64 PATH="/tmp/go/bin:$PATH" make cloudflared
|
RUN GOOS=linux GOARCH=amd64 PATH="/tmp/go/bin:$PATH" make cloudflared
|
||||||
|
|
||||||
# use a distroless base image with glibc
|
# use a distroless base image with glibc
|
||||||
FROM gcr.io/distroless/base-debian11:nonroot
|
FROM gcr.io/distroless/base-debian12:nonroot
|
||||||
|
|
||||||
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"
|
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ RUN .teamcity/install-cloudflare-go.sh
|
||||||
RUN GOOS=linux GOARCH=arm64 PATH="/tmp/go/bin:$PATH" make cloudflared
|
RUN GOOS=linux GOARCH=arm64 PATH="/tmp/go/bin:$PATH" make cloudflared
|
||||||
|
|
||||||
# use a distroless base image with glibc
|
# use a distroless base image with glibc
|
||||||
FROM gcr.io/distroless/base-debian11:nonroot-arm64
|
FROM gcr.io/distroless/base-debian12:nonroot-arm64
|
||||||
|
|
||||||
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"
|
LABEL org.opencontainers.image.source="https://github.com/cloudflare/cloudflared"
|
||||||
|
|
||||||
|
|
6
Makefile
6
Makefile
|
@ -24,7 +24,7 @@ else
|
||||||
DEB_PACKAGE_NAME := $(BINARY_NAME)
|
DEB_PACKAGE_NAME := $(BINARY_NAME)
|
||||||
endif
|
endif
|
||||||
|
|
||||||
DATE := $(shell date -u '+%Y-%m-%d-%H%M UTC')
|
DATE := $(shell date -u -r RELEASE_NOTES '+%Y-%m-%d-%H%M UTC')
|
||||||
VERSION_FLAGS := -X "main.Version=$(VERSION)" -X "main.BuildTime=$(DATE)"
|
VERSION_FLAGS := -X "main.Version=$(VERSION)" -X "main.BuildTime=$(DATE)"
|
||||||
ifdef PACKAGE_MANAGER
|
ifdef PACKAGE_MANAGER
|
||||||
VERSION_FLAGS := $(VERSION_FLAGS) -X "github.com/cloudflare/cloudflared/cmd/cloudflared/updater.BuiltForPackageManager=$(PACKAGE_MANAGER)"
|
VERSION_FLAGS := $(VERSION_FLAGS) -X "github.com/cloudflare/cloudflared/cmd/cloudflared/updater.BuiltForPackageManager=$(PACKAGE_MANAGER)"
|
||||||
|
@ -237,6 +237,10 @@ github-release:
|
||||||
python3 github_release.py --path $(PWD)/built_artifacts --release-version $(VERSION)
|
python3 github_release.py --path $(PWD)/built_artifacts --release-version $(VERSION)
|
||||||
python3 github_message.py --release-version $(VERSION)
|
python3 github_message.py --release-version $(VERSION)
|
||||||
|
|
||||||
|
.PHONY: macos-release
|
||||||
|
macos-release:
|
||||||
|
python3 github_release.py --path $(PWD)/artifacts/ --release-version $(VERSION)
|
||||||
|
|
||||||
.PHONY: r2-linux-release
|
.PHONY: r2-linux-release
|
||||||
r2-linux-release:
|
r2-linux-release:
|
||||||
python3 ./release_pkgs.py
|
python3 ./release_pkgs.py
|
||||||
|
|
|
@ -40,7 +40,7 @@ User documentation for Cloudflare Tunnel can be found at https://developers.clou
|
||||||
|
|
||||||
Once installed, you can authenticate `cloudflared` into your Cloudflare account and begin creating Tunnels to serve traffic to your origins.
|
Once installed, you can authenticate `cloudflared` into your Cloudflare account and begin creating Tunnels to serve traffic to your origins.
|
||||||
|
|
||||||
* Create a Tunnel with [these instructions](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/create-tunnel)
|
* Create a Tunnel with [these instructions](https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/get-started/)
|
||||||
* Route traffic to that Tunnel:
|
* Route traffic to that Tunnel:
|
||||||
* Via public [DNS records in Cloudflare](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/routing-to-tunnel/dns)
|
* Via public [DNS records in Cloudflare](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/routing-to-tunnel/dns)
|
||||||
* Or via a public hostname guided by a [Cloudflare Load Balancer](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/routing-to-tunnel/lb)
|
* Or via a public hostname guided by a [Cloudflare Load Balancer](https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/routing-to-tunnel/lb)
|
||||||
|
|
|
@ -1,3 +1,30 @@
|
||||||
|
2025.4.2
|
||||||
|
- 2025-04-30 chore: Do not use gitlab merge request pipelines
|
||||||
|
- 2025-04-30 DEVTOOLS-16383: Create GitlabCI pipeline to release Mac builds
|
||||||
|
- 2025-04-24 TUN-9255: Improve flush on write conditions in http2 tunnel type to match what is done on the edge
|
||||||
|
- 2025-04-10 SDLC-3727 - Adding FIPS status to backstage
|
||||||
|
|
||||||
|
2025.4.0
|
||||||
|
- 2025-04-02 Fix broken links in `cmd/cloudflared/*.go` related to running tunnel as a service
|
||||||
|
- 2025-04-02 chore: remove repetitive words
|
||||||
|
- 2025-04-01 Fix messages to point to one.dash.cloudflare.com
|
||||||
|
- 2025-04-01 feat: emit explicit errors for the `service` command on unsupported OSes
|
||||||
|
- 2025-04-01 Use RELEASE_NOTES date instead of build date
|
||||||
|
- 2025-04-01 chore: Update tunnel configuration link in the readme
|
||||||
|
- 2025-04-01 fix: expand home directory for credentials file
|
||||||
|
- 2025-04-01 fix: Use path and filepath operation appropriately
|
||||||
|
- 2025-04-01 feat: Adds a new command line for tunnel run for token file
|
||||||
|
- 2025-04-01 chore: fix linter rules
|
||||||
|
- 2025-03-17 TUN-9101: Don't ignore errors on `cloudflared access ssh`
|
||||||
|
- 2025-03-06 TUN-9089: Pin go import to v0.30.0, v0.31.0 requires go 1.23
|
||||||
|
|
||||||
|
2025.2.1
|
||||||
|
- 2025-02-26 TUN-9016: update base-debian to v12
|
||||||
|
- 2025-02-25 TUN-8960: Connect to FED API GW based on the OriginCert's endpoint
|
||||||
|
- 2025-02-25 TUN-9007: modify logic to resolve region when the tunnel token has an endpoint field
|
||||||
|
- 2025-02-13 SDLC-3762: Remove backstage.io/source-location from catalog-info.yaml
|
||||||
|
- 2025-02-06 TUN-8914: Create a flags module to group all cloudflared cli flags
|
||||||
|
|
||||||
2025.2.0
|
2025.2.0
|
||||||
- 2025-02-03 TUN-8914: Add a new configuration to locally override the max-active-flows
|
- 2025-02-03 TUN-8914: Add a new configuration to locally override the max-active-flows
|
||||||
- 2025-02-03 Bump x/crypto to 0.31.0
|
- 2025-02-03 Bump x/crypto to 0.31.0
|
||||||
|
|
|
@ -4,7 +4,6 @@ metadata:
|
||||||
name: cloudflared
|
name: cloudflared
|
||||||
description: Client for Cloudflare Tunnels
|
description: Client for Cloudflare Tunnels
|
||||||
annotations:
|
annotations:
|
||||||
backstage.io/source-location: url:https://bitbucket.cfdata.org/projects/TUN/repos/cloudflared/browse
|
|
||||||
cloudflare.com/software-excellence-opt-in: "true"
|
cloudflare.com/software-excellence-opt-in: "true"
|
||||||
cloudflare.com/jira-project-key: "TUN"
|
cloudflare.com/jira-project-key: "TUN"
|
||||||
cloudflare.com/jira-project-component: "Cloudflare Tunnel"
|
cloudflare.com/jira-project-component: "Cloudflare Tunnel"
|
||||||
|
@ -14,3 +13,5 @@ spec:
|
||||||
type: "service"
|
type: "service"
|
||||||
lifecycle: "Active"
|
lifecycle: "Active"
|
||||||
owner: "teams/tunnel-teams-routing"
|
owner: "teams/tunnel-teams-routing"
|
||||||
|
cf:
|
||||||
|
FIPS: "required"
|
||||||
|
|
|
@ -16,7 +16,7 @@ bullseye: &bullseye
|
||||||
- golangci-lint
|
- golangci-lint
|
||||||
pre-cache: &build_pre_cache
|
pre-cache: &build_pre_cache
|
||||||
- export GOCACHE=/cfsetup_build/.cache/go-build
|
- export GOCACHE=/cfsetup_build/.cache/go-build
|
||||||
- go install golang.org/x/tools/cmd/goimports@latest
|
- go install golang.org/x/tools/cmd/goimports@v0.30.0
|
||||||
post-cache:
|
post-cache:
|
||||||
# Linting
|
# Linting
|
||||||
- make lint
|
- make lint
|
||||||
|
|
|
@ -104,7 +104,7 @@ func ssh(c *cli.Context) error {
|
||||||
case 3:
|
case 3:
|
||||||
options.OriginURL = fmt.Sprintf("https://%s:%s", parts[2], parts[1])
|
options.OriginURL = fmt.Sprintf("https://%s:%s", parts[2], parts[1])
|
||||||
options.TLSClientConfig = &tls.Config{
|
options.TLSClientConfig = &tls.Config{
|
||||||
InsecureSkipVerify: true,
|
InsecureSkipVerify: true, // #nosec G402
|
||||||
ServerName: parts[0],
|
ServerName: parts[0],
|
||||||
}
|
}
|
||||||
log.Warn().Msgf("Using insecure SSL connection because SNI overridden to %s", parts[0])
|
log.Warn().Msgf("Using insecure SSL connection because SNI overridden to %s", parts[0])
|
||||||
|
@ -141,6 +141,5 @@ func ssh(c *cli.Context) error {
|
||||||
logger := log.With().Str("host", url.Host).Logger()
|
logger := log.With().Str("host", url.Host).Logger()
|
||||||
s = stream.NewDebugStream(s, &logger, maxMessages)
|
s = stream.NewDebugStream(s, &logger, maxMessages)
|
||||||
}
|
}
|
||||||
carrier.StartClient(wsConn, s, options)
|
return carrier.StartClient(wsConn, s, options)
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -19,6 +19,7 @@ import (
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/carrier"
|
"github.com/cloudflare/cloudflared/carrier"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
||||||
|
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/cloudflare/cloudflared/logger"
|
||||||
"github.com/cloudflare/cloudflared/sshgen"
|
"github.com/cloudflare/cloudflared/sshgen"
|
||||||
"github.com/cloudflare/cloudflared/token"
|
"github.com/cloudflare/cloudflared/token"
|
||||||
|
@ -172,15 +173,15 @@ func Commands() []*cli.Command {
|
||||||
EnvVars: []string{"TUNNEL_SERVICE_TOKEN_SECRET"},
|
EnvVars: []string{"TUNNEL_SERVICE_TOKEN_SECRET"},
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: logger.LogFileFlag,
|
Name: cfdflags.LogFile,
|
||||||
Usage: "Save application log to this file for reporting issues.",
|
Usage: "Save application log to this file for reporting issues.",
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: logger.LogSSHDirectoryFlag,
|
Name: cfdflags.LogDirectory,
|
||||||
Usage: "Save application log to this directory for reporting issues.",
|
Usage: "Save application log to this directory for reporting issues.",
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: logger.LogSSHLevelFlag,
|
Name: cfdflags.LogLevelSSH,
|
||||||
Aliases: []string{"loglevel"}, //added to match the tunnel side
|
Aliases: []string{"loglevel"}, //added to match the tunnel side
|
||||||
Usage: "Application logging level {debug, info, warn, error, fatal}. ",
|
Usage: "Application logging level {debug, info, warn, error, fatal}. ",
|
||||||
},
|
},
|
||||||
|
@ -342,7 +343,7 @@ func run(cmd string, args ...string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
io.Copy(os.Stderr, stderr)
|
_, _ = io.Copy(os.Stderr, stderr)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
stdout, err := c.StdoutPipe()
|
stdout, err := c.StdoutPipe()
|
||||||
|
@ -350,7 +351,7 @@ func run(cmd string, args ...string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
go func() {
|
go func() {
|
||||||
io.Copy(os.Stdout, stdout)
|
_, _ = io.Copy(os.Stdout, stdout)
|
||||||
}()
|
}()
|
||||||
return c.Run()
|
return c.Run()
|
||||||
}
|
}
|
||||||
|
@ -531,7 +532,7 @@ func isFileThere(candidate string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// verifyTokenAtEdge checks for a token on disk, or generates a new one.
|
// verifyTokenAtEdge checks for a token on disk, or generates a new one.
|
||||||
// Then makes a request to to the origin with the token to ensure it is valid.
|
// Then makes a request to the origin with the token to ensure it is valid.
|
||||||
// Returns nil if token is valid.
|
// Returns nil if token is valid.
|
||||||
func verifyTokenAtEdge(appUrl *url.URL, appInfo *token.AppInfo, c *cli.Context, log *zerolog.Logger) error {
|
func verifyTokenAtEdge(appUrl *url.URL, appInfo *token.AppInfo, c *cli.Context, log *zerolog.Logger) error {
|
||||||
headers := parseRequestHeaders(c.StringSlice(sshHeaderFlag))
|
headers := parseRequestHeaders(c.StringSlice(sshHeaderFlag))
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"github.com/urfave/cli/v2/altsrc"
|
"github.com/urfave/cli/v2/altsrc"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -15,14 +15,14 @@ var (
|
||||||
func ConfigureLoggingFlags(shouldHide bool) []cli.Flag {
|
func ConfigureLoggingFlags(shouldHide bool) []cli.Flag {
|
||||||
return []cli.Flag{
|
return []cli.Flag{
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: logger.LogLevelFlag,
|
Name: cfdflags.LogLevel,
|
||||||
Value: "info",
|
Value: "info",
|
||||||
Usage: "Application logging level {debug, info, warn, error, fatal}. " + debugLevelWarning,
|
Usage: "Application logging level {debug, info, warn, error, fatal}. " + debugLevelWarning,
|
||||||
EnvVars: []string{"TUNNEL_LOGLEVEL"},
|
EnvVars: []string{"TUNNEL_LOGLEVEL"},
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: logger.LogTransportLevelFlag,
|
Name: cfdflags.TransportLogLevel,
|
||||||
Aliases: []string{"proto-loglevel"}, // This flag used to be called proto-loglevel
|
Aliases: []string{"proto-loglevel"}, // This flag used to be called proto-loglevel
|
||||||
Value: "info",
|
Value: "info",
|
||||||
Usage: "Transport logging level(previously called protocol logging level) {debug, info, warn, error, fatal}",
|
Usage: "Transport logging level(previously called protocol logging level) {debug, info, warn, error, fatal}",
|
||||||
|
@ -30,19 +30,19 @@ func ConfigureLoggingFlags(shouldHide bool) []cli.Flag {
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: logger.LogFileFlag,
|
Name: cfdflags.LogFile,
|
||||||
Usage: "Save application log to this file for reporting issues.",
|
Usage: "Save application log to this file for reporting issues.",
|
||||||
EnvVars: []string{"TUNNEL_LOGFILE"},
|
EnvVars: []string{"TUNNEL_LOGFILE"},
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: logger.LogDirectoryFlag,
|
Name: cfdflags.LogDirectory,
|
||||||
Usage: "Save application log to this directory for reporting issues.",
|
Usage: "Save application log to this directory for reporting issues.",
|
||||||
EnvVars: []string{"TUNNEL_LOGDIRECTORY"},
|
EnvVars: []string{"TUNNEL_LOGDIRECTORY"},
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: "trace-output",
|
Name: cfdflags.TraceOutput,
|
||||||
Usage: "Name of trace output file, generated when cloudflared stops.",
|
Usage: "Name of trace output file, generated when cloudflared stops.",
|
||||||
EnvVars: []string{"TUNNEL_TRACE_OUTPUT"},
|
EnvVars: []string{"TUNNEL_TRACE_OUTPUT"},
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
|
|
|
@ -0,0 +1,155 @@
|
||||||
|
package flags
|
||||||
|
|
||||||
|
const (
|
||||||
|
// HaConnections specifies how many connections to make to the edge
|
||||||
|
HaConnections = "ha-connections"
|
||||||
|
|
||||||
|
// SshPort is the port on localhost the cloudflared ssh server will run on
|
||||||
|
SshPort = "local-ssh-port"
|
||||||
|
|
||||||
|
// SshIdleTimeout defines the duration a SSH session can remain idle before being closed
|
||||||
|
SshIdleTimeout = "ssh-idle-timeout"
|
||||||
|
|
||||||
|
// SshMaxTimeout defines the max duration a SSH session can remain open for
|
||||||
|
SshMaxTimeout = "ssh-max-timeout"
|
||||||
|
|
||||||
|
// SshLogUploaderBucketName is the bucket name to use for the SSH log uploader
|
||||||
|
SshLogUploaderBucketName = "bucket-name"
|
||||||
|
|
||||||
|
// SshLogUploaderRegionName is the AWS region name to use for the SSH log uploader
|
||||||
|
SshLogUploaderRegionName = "region-name"
|
||||||
|
|
||||||
|
// SshLogUploaderSecretID is the Secret id of SSH log uploader
|
||||||
|
SshLogUploaderSecretID = "secret-id"
|
||||||
|
|
||||||
|
// SshLogUploaderAccessKeyID is the Access key id of SSH log uploader
|
||||||
|
SshLogUploaderAccessKeyID = "access-key-id"
|
||||||
|
|
||||||
|
// SshLogUploaderSessionTokenID is the Session token of SSH log uploader
|
||||||
|
SshLogUploaderSessionTokenID = "session-token"
|
||||||
|
|
||||||
|
// SshLogUploaderS3URL is the S3 URL of SSH log uploader (e.g. don't use AWS s3 and use google storage bucket instead)
|
||||||
|
SshLogUploaderS3URL = "s3-url-host"
|
||||||
|
|
||||||
|
// HostKeyPath is the path of the dir to save SSH host keys too
|
||||||
|
HostKeyPath = "host-key-path"
|
||||||
|
|
||||||
|
// RpcTimeout is how long to wait for a Capnp RPC request to the edge
|
||||||
|
RpcTimeout = "rpc-timeout"
|
||||||
|
|
||||||
|
// WriteStreamTimeout sets if we should have a timeout when writing data to a stream towards the destination (edge/origin).
|
||||||
|
WriteStreamTimeout = "write-stream-timeout"
|
||||||
|
|
||||||
|
// QuicDisablePathMTUDiscovery sets if QUIC should not perform PTMU discovery and use a smaller (safe) packet size.
|
||||||
|
// Packets will then be at most 1252 (IPv4) / 1232 (IPv6) bytes in size.
|
||||||
|
// Note that this may result in packet drops for UDP proxying, since we expect being able to send at least 1280 bytes of inner packets.
|
||||||
|
QuicDisablePathMTUDiscovery = "quic-disable-pmtu-discovery"
|
||||||
|
|
||||||
|
// QuicConnLevelFlowControlLimit controls the max flow control limit allocated for a QUIC connection. This controls how much data is the
|
||||||
|
// receiver willing to buffer. Once the limit is reached, the sender will send a DATA_BLOCKED frame to indicate it has more data to write,
|
||||||
|
// but it's blocked by flow control
|
||||||
|
QuicConnLevelFlowControlLimit = "quic-connection-level-flow-control-limit"
|
||||||
|
|
||||||
|
// QuicStreamLevelFlowControlLimit is similar to quicConnLevelFlowControlLimit but for each QUIC stream. When the sender is blocked,
|
||||||
|
// it will send a STREAM_DATA_BLOCKED frame
|
||||||
|
QuicStreamLevelFlowControlLimit = "quic-stream-level-flow-control-limit"
|
||||||
|
|
||||||
|
// Ui is to enable launching cloudflared in interactive UI mode
|
||||||
|
Ui = "ui"
|
||||||
|
|
||||||
|
// ConnectorLabel is the command line flag to give a meaningful label to a specific connector
|
||||||
|
ConnectorLabel = "label"
|
||||||
|
|
||||||
|
// MaxActiveFlows is the command line flag to set the maximum number of flows that cloudflared can be processing at the same time
|
||||||
|
MaxActiveFlows = "max-active-flows"
|
||||||
|
|
||||||
|
// Tag is the command line flag to set custom tags used to identify this tunnel via added HTTP request headers to the origin
|
||||||
|
Tag = "tag"
|
||||||
|
|
||||||
|
// Protocol is the command line flag to set the protocol to use to connect to the Cloudflare Edge
|
||||||
|
Protocol = "protocol"
|
||||||
|
|
||||||
|
// PostQuantum is the command line flag to force the connection to Cloudflare Edge to use Post Quantum cryptography
|
||||||
|
PostQuantum = "post-quantum"
|
||||||
|
|
||||||
|
// Features is the command line flag to opt into various features that are still being developed or tested
|
||||||
|
Features = "features"
|
||||||
|
|
||||||
|
// EdgeIpVersion is the command line flag to set the Cloudflare Edge IP address version to connect with
|
||||||
|
EdgeIpVersion = "edge-ip-version"
|
||||||
|
|
||||||
|
// EdgeBindAddress is the command line flag to bind to IP address for outgoing connections to Cloudflare Edge
|
||||||
|
EdgeBindAddress = "edge-bind-address"
|
||||||
|
|
||||||
|
// Force is the command line flag to specify if you wish to force an action
|
||||||
|
Force = "force"
|
||||||
|
|
||||||
|
// Edge is the command line flag to set the address of the Cloudflare tunnel server. Only works in Cloudflare's internal testing environment
|
||||||
|
Edge = "edge"
|
||||||
|
|
||||||
|
// Region is the command line flag to set the Cloudflare Edge region to connect to
|
||||||
|
Region = "region"
|
||||||
|
|
||||||
|
// IsAutoUpdated is the command line flag to signal the new process that cloudflared has been autoupdated
|
||||||
|
IsAutoUpdated = "is-autoupdated"
|
||||||
|
|
||||||
|
// LBPool is the command line flag to set the name of the load balancing pool to add this origin to
|
||||||
|
LBPool = "lb-pool"
|
||||||
|
|
||||||
|
// Retries is the command line flag to set the maximum number of retries for connection/protocol errors
|
||||||
|
Retries = "retries"
|
||||||
|
|
||||||
|
// MaxEdgeAddrRetries is the command line flag to set the maximum number of times to retry on edge addrs before falling back to a lower protocol
|
||||||
|
MaxEdgeAddrRetries = "max-edge-addr-retries"
|
||||||
|
|
||||||
|
// GracePeriod is the command line flag to set the maximum amount of time that cloudflared waits to shut down if it is still serving requests
|
||||||
|
GracePeriod = "grace-period"
|
||||||
|
|
||||||
|
// ICMPV4Src is the command line flag to set the source address and the interface name to send/receive ICMPv4 messages
|
||||||
|
ICMPV4Src = "icmpv4-src"
|
||||||
|
|
||||||
|
// ICMPV6Src is the command line flag to set the source address and the interface name to send/receive ICMPv6 messages
|
||||||
|
ICMPV6Src = "icmpv6-src"
|
||||||
|
|
||||||
|
// ProxyDns is the command line flag to run DNS server over HTTPS
|
||||||
|
ProxyDns = "proxy-dns"
|
||||||
|
|
||||||
|
// Name is the command line to set the name of the tunnel
|
||||||
|
Name = "name"
|
||||||
|
|
||||||
|
// AutoUpdateFreq is the command line for setting the frequency that cloudflared checks for updates
|
||||||
|
AutoUpdateFreq = "autoupdate-freq"
|
||||||
|
|
||||||
|
// NoAutoUpdate is the command line flag to disable cloudflared from checking for updates
|
||||||
|
NoAutoUpdate = "no-autoupdate"
|
||||||
|
|
||||||
|
// LogLevel is the command line flag for the cloudflared logging level
|
||||||
|
LogLevel = "loglevel"
|
||||||
|
|
||||||
|
// LogLevelSSH is the command line flag for the cloudflared ssh logging level
|
||||||
|
LogLevelSSH = "log-level"
|
||||||
|
|
||||||
|
// TransportLogLevel is the command line flag for the transport logging level
|
||||||
|
TransportLogLevel = "transport-loglevel"
|
||||||
|
|
||||||
|
// LogFile is the command line flag to define the file where application logs will be stored
|
||||||
|
LogFile = "logfile"
|
||||||
|
|
||||||
|
// LogDirectory is the command line flag to define the directory where application logs will be stored.
|
||||||
|
LogDirectory = "log-directory"
|
||||||
|
|
||||||
|
// TraceOutput is the command line flag to set the name of trace output file
|
||||||
|
TraceOutput = "trace-output"
|
||||||
|
|
||||||
|
// OriginCert is the command line flag to define the path for the origin certificate used by cloudflared
|
||||||
|
OriginCert = "origincert"
|
||||||
|
|
||||||
|
// Metrics is the command line flag to define the address of the metrics server
|
||||||
|
Metrics = "metrics"
|
||||||
|
|
||||||
|
// MetricsUpdateFreq is the command line flag to define how frequently tunnel metrics are updated
|
||||||
|
MetricsUpdateFreq = "metrics-update-freq"
|
||||||
|
|
||||||
|
// ApiURL is the command line flag used to define the base URL of the API
|
||||||
|
ApiURL = "api-url"
|
||||||
|
)
|
|
@ -3,11 +3,38 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
cli "github.com/urfave/cli/v2"
|
cli "github.com/urfave/cli/v2"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
||||||
)
|
)
|
||||||
|
|
||||||
func runApp(app *cli.App, graceShutdownC chan struct{}) {
|
func runApp(app *cli.App, graceShutdownC chan struct{}) {
|
||||||
|
app.Commands = append(app.Commands, &cli.Command{
|
||||||
|
Name: "service",
|
||||||
|
Usage: "Manages the cloudflared system service (not supported on this operating system)",
|
||||||
|
Subcommands: []*cli.Command{
|
||||||
|
{
|
||||||
|
Name: "install",
|
||||||
|
Usage: "Install cloudflared as a system service (not supported on this operating system)",
|
||||||
|
Action: cliutil.ConfiguredAction(installGenericService),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Name: "uninstall",
|
||||||
|
Usage: "Uninstall the cloudflared service (not supported on this operating system)",
|
||||||
|
Action: cliutil.ConfiguredAction(uninstallGenericService),
|
||||||
|
},
|
||||||
|
},
|
||||||
|
})
|
||||||
app.Run(os.Args)
|
app.Run(os.Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func installGenericService(c *cli.Context) error {
|
||||||
|
return fmt.Errorf("service installation is not supported on this operating system")
|
||||||
|
}
|
||||||
|
|
||||||
|
func uninstallGenericService(c *cli.Context) error {
|
||||||
|
return fmt.Errorf("service uninstallation is not supported on this operating system")
|
||||||
|
}
|
||||||
|
|
|
@ -120,7 +120,7 @@ func installLaunchd(c *cli.Context) error {
|
||||||
log.Info().Msg("Installing cloudflared client as an user launch agent. " +
|
log.Info().Msg("Installing cloudflared client as an user launch agent. " +
|
||||||
"Note that cloudflared client will only run when the user is logged in. " +
|
"Note that cloudflared client will only run when the user is logged in. " +
|
||||||
"If you want to run cloudflared client at boot, install with root permission. " +
|
"If you want to run cloudflared client at boot, install with root permission. " +
|
||||||
"For more information, visit https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/run-tunnel/run-as-service")
|
"For more information, visit https://developers.cloudflare.com/cloudflare-one/connections/connect-networks/configure-tunnels/local-management/as-a-service/macos/")
|
||||||
}
|
}
|
||||||
etPath, err := os.Executable()
|
etPath, err := os.Executable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/access"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/access"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
||||||
|
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/proxydns"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/proxydns"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/tail"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/tail"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel"
|
||||||
|
@ -105,7 +106,7 @@ func commands(version func(c *cli.Context)) []*cli.Command {
|
||||||
Usage: "specify if you wish to update to the latest beta version",
|
Usage: "specify if you wish to update to the latest beta version",
|
||||||
},
|
},
|
||||||
&cli.BoolFlag{
|
&cli.BoolFlag{
|
||||||
Name: "force",
|
Name: cfdflags.Force,
|
||||||
Usage: "specify if you wish to force an upgrade to the latest version regardless of the current version",
|
Usage: "specify if you wish to force an upgrade to the latest version regardless of the current version",
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
},
|
},
|
||||||
|
|
|
@ -1,13 +1,13 @@
|
||||||
package main
|
package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
|
||||||
"bytes"
|
"bytes"
|
||||||
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
"path"
|
"path/filepath"
|
||||||
"text/template"
|
"text/template"
|
||||||
|
|
||||||
homedir "github.com/mitchellh/go-homedir"
|
homedir "github.com/mitchellh/go-homedir"
|
||||||
|
@ -44,7 +44,7 @@ func (st *ServiceTemplate) Generate(args *ServiceTemplateArgs) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err = os.Stat(resolvedPath); err == nil {
|
if _, err = os.Stat(resolvedPath); err == nil {
|
||||||
return fmt.Errorf(serviceAlreadyExistsWarn(resolvedPath))
|
return errors.New(serviceAlreadyExistsWarn(resolvedPath))
|
||||||
}
|
}
|
||||||
|
|
||||||
var buffer bytes.Buffer
|
var buffer bytes.Buffer
|
||||||
|
@ -57,7 +57,7 @@ func (st *ServiceTemplate) Generate(args *ServiceTemplateArgs) error {
|
||||||
fileMode = st.FileMode
|
fileMode = st.FileMode
|
||||||
}
|
}
|
||||||
|
|
||||||
plistFolder := path.Dir(resolvedPath)
|
plistFolder := filepath.Dir(resolvedPath)
|
||||||
err = os.MkdirAll(plistFolder, 0o755)
|
err = os.MkdirAll(plistFolder, 0o755)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("error creating %s: %v", plistFolder, err)
|
return fmt.Errorf("error creating %s: %v", plistFolder, err)
|
||||||
|
@ -118,49 +118,6 @@ func ensureConfigDirExists(configDir string) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
// openFile opens the file at path. If create is set and the file exists, returns nil, true, nil
|
|
||||||
func openFile(path string, create bool) (file *os.File, exists bool, err error) {
|
|
||||||
expandedPath, err := homedir.Expand(path)
|
|
||||||
if err != nil {
|
|
||||||
return nil, false, err
|
|
||||||
}
|
|
||||||
if create {
|
|
||||||
fileInfo, err := os.Stat(expandedPath)
|
|
||||||
if err == nil && fileInfo.Size() > 0 {
|
|
||||||
return nil, true, nil
|
|
||||||
}
|
|
||||||
file, err = os.OpenFile(expandedPath, os.O_RDWR|os.O_CREATE, 0600)
|
|
||||||
} else {
|
|
||||||
file, err = os.Open(expandedPath)
|
|
||||||
}
|
|
||||||
return file, false, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyCredential(srcCredentialPath, destCredentialPath string) error {
|
|
||||||
destFile, exists, err := openFile(destCredentialPath, true)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
} else if exists {
|
|
||||||
// credentials already exist, do nothing
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
defer destFile.Close()
|
|
||||||
|
|
||||||
srcFile, _, err := openFile(srcCredentialPath, false)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
defer srcFile.Close()
|
|
||||||
|
|
||||||
// Copy certificate
|
|
||||||
_, err = io.Copy(destFile, srcFile)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to copy %s to %s: %v", srcCredentialPath, destCredentialPath, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func copyFile(src, dest string) error {
|
func copyFile(src, dest string) error {
|
||||||
srcFile, err := os.Open(src)
|
srcFile, err := os.Open(src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -187,36 +144,3 @@ func copyFile(src, dest string) error {
|
||||||
ok = true
|
ok = true
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyConfig(srcConfigPath, destConfigPath string) error {
|
|
||||||
// Copy or create config
|
|
||||||
destFile, exists, err := openFile(destConfigPath, true)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("cannot open %s with error: %s", destConfigPath, err)
|
|
||||||
} else if exists {
|
|
||||||
// config already exists, do nothing
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
defer destFile.Close()
|
|
||||||
|
|
||||||
srcFile, _, err := openFile(srcConfigPath, false)
|
|
||||||
if err != nil {
|
|
||||||
fmt.Println("Your service needs a config file that at least specifies the hostname option.")
|
|
||||||
fmt.Println("Type in a hostname now, or leave it blank and create the config file later.")
|
|
||||||
fmt.Print("Hostname: ")
|
|
||||||
reader := bufio.NewReader(os.Stdin)
|
|
||||||
input, _ := reader.ReadString('\n')
|
|
||||||
if input == "" {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
fmt.Fprintf(destFile, "hostname: %s\n", input)
|
|
||||||
} else {
|
|
||||||
defer srcFile.Close()
|
|
||||||
_, err = io.Copy(destFile, srcFile)
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("unable to copy %s to %s: %v", srcConfigPath, destConfigPath, err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
|
@ -18,14 +18,12 @@ import (
|
||||||
"nhooyr.io/websocket"
|
"nhooyr.io/websocket"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
||||||
|
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/credentials"
|
"github.com/cloudflare/cloudflared/credentials"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/management"
|
"github.com/cloudflare/cloudflared/management"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var buildInfo *cliutil.BuildInfo
|
||||||
buildInfo *cliutil.BuildInfo
|
|
||||||
)
|
|
||||||
|
|
||||||
func Init(bi *cliutil.BuildInfo) {
|
func Init(bi *cliutil.BuildInfo) {
|
||||||
buildInfo = bi
|
buildInfo = bi
|
||||||
|
@ -56,7 +54,7 @@ func managementTokenCommand(c *cli.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var tokenResponse = struct {
|
tokenResponse := struct {
|
||||||
Token string `json:"token"`
|
Token string `json:"token"`
|
||||||
}{Token: token}
|
}{Token: token}
|
||||||
|
|
||||||
|
@ -119,13 +117,13 @@ func buildTailCommand(subcommands []*cli.Command) *cli.Command {
|
||||||
Value: "",
|
Value: "",
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: logger.LogLevelFlag,
|
Name: cfdflags.LogLevel,
|
||||||
Value: "info",
|
Value: "info",
|
||||||
Usage: "Application logging level {debug, info, warn, error, fatal}",
|
Usage: "Application logging level {debug, info, warn, error, fatal}",
|
||||||
EnvVars: []string{"TUNNEL_LOGLEVEL"},
|
EnvVars: []string{"TUNNEL_LOGLEVEL"},
|
||||||
},
|
},
|
||||||
&cli.StringFlag{
|
&cli.StringFlag{
|
||||||
Name: credentials.OriginCertFlag,
|
Name: cfdflags.OriginCert,
|
||||||
Usage: "Path to the certificate generated for your origin when you run cloudflared login.",
|
Usage: "Path to the certificate generated for your origin when you run cloudflared login.",
|
||||||
EnvVars: []string{"TUNNEL_ORIGIN_CERT"},
|
EnvVars: []string{"TUNNEL_ORIGIN_CERT"},
|
||||||
Value: credentials.FindDefaultOriginCertPath(),
|
Value: credentials.FindDefaultOriginCertPath(),
|
||||||
|
@ -169,7 +167,7 @@ func handleValidationError(resp *http.Response, log *zerolog.Logger) {
|
||||||
// logger will be created to emit only against the os.Stderr as to not obstruct with normal output from
|
// logger will be created to emit only against the os.Stderr as to not obstruct with normal output from
|
||||||
// management requests
|
// management requests
|
||||||
func createLogger(c *cli.Context) *zerolog.Logger {
|
func createLogger(c *cli.Context) *zerolog.Logger {
|
||||||
level, levelErr := zerolog.ParseLevel(c.String(logger.LogLevelFlag))
|
level, levelErr := zerolog.ParseLevel(c.String(cfdflags.LogLevel))
|
||||||
if levelErr != nil {
|
if levelErr != nil {
|
||||||
level = zerolog.InfoLevel
|
level = zerolog.InfoLevel
|
||||||
}
|
}
|
||||||
|
@ -183,9 +181,10 @@ func createLogger(c *cli.Context) *zerolog.Logger {
|
||||||
// parseFilters will attempt to parse provided filters to send to with the EventStartStreaming
|
// parseFilters will attempt to parse provided filters to send to with the EventStartStreaming
|
||||||
func parseFilters(c *cli.Context) (*management.StreamingFilters, error) {
|
func parseFilters(c *cli.Context) (*management.StreamingFilters, error) {
|
||||||
var level *management.LogLevel
|
var level *management.LogLevel
|
||||||
var events []management.LogEventType
|
|
||||||
var sample float64
|
var sample float64
|
||||||
|
|
||||||
|
events := make([]management.LogEventType, 0)
|
||||||
|
|
||||||
argLevel := c.String("level")
|
argLevel := c.String("level")
|
||||||
argEvents := c.StringSlice("event")
|
argEvents := c.StringSlice("event")
|
||||||
argSample := c.Float64("sample")
|
argSample := c.Float64("sample")
|
||||||
|
@ -225,12 +224,12 @@ func parseFilters(c *cli.Context) (*management.StreamingFilters, error) {
|
||||||
|
|
||||||
// getManagementToken will make a call to the Cloudflare API to acquire a management token for the requested tunnel.
|
// getManagementToken will make a call to the Cloudflare API to acquire a management token for the requested tunnel.
|
||||||
func getManagementToken(c *cli.Context, log *zerolog.Logger) (string, error) {
|
func getManagementToken(c *cli.Context, log *zerolog.Logger) (string, error) {
|
||||||
userCreds, err := credentials.Read(c.String(credentials.OriginCertFlag), log)
|
userCreds, err := credentials.Read(c.String(cfdflags.OriginCert), log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
|
||||||
client, err := userCreds.Client(c.String("api-url"), buildInfo.UserAgent(), log)
|
client, err := userCreds.Client(c.String(cfdflags.ApiURL), buildInfo.UserAgent(), log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
|
@ -331,6 +330,7 @@ func Run(c *cli.Context) error {
|
||||||
header["cf-trace-id"] = []string{trace}
|
header["cf-trace-id"] = []string{trace}
|
||||||
}
|
}
|
||||||
ctx := c.Context
|
ctx := c.Context
|
||||||
|
// nolint: bodyclose
|
||||||
conn, resp, err := websocket.Dial(ctx, u.String(), &websocket.DialOptions{
|
conn, resp, err := websocket.Dial(ctx, u.String(), &websocket.DialOptions{
|
||||||
HTTPHeader: header,
|
HTTPHeader: header,
|
||||||
})
|
})
|
||||||
|
|
|
@ -16,7 +16,7 @@ import (
|
||||||
"github.com/facebookgo/grace/gracenet"
|
"github.com/facebookgo/grace/gracenet"
|
||||||
"github.com/getsentry/sentry-go"
|
"github.com/getsentry/sentry-go"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
homedir "github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
@ -24,6 +24,7 @@ import (
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cfapi"
|
"github.com/cloudflare/cloudflared/cfapi"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
||||||
|
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/proxydns"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/proxydns"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/updater"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/updater"
|
||||||
"github.com/cloudflare/cloudflared/config"
|
"github.com/cloudflare/cloudflared/config"
|
||||||
|
@ -47,61 +48,6 @@ import (
|
||||||
const (
|
const (
|
||||||
sentryDSN = "https://56a9c9fa5c364ab28f34b14f35ea0f1b:3e8827f6f9f740738eb11138f7bebb68@sentry.io/189878"
|
sentryDSN = "https://56a9c9fa5c364ab28f34b14f35ea0f1b:3e8827f6f9f740738eb11138f7bebb68@sentry.io/189878"
|
||||||
|
|
||||||
// ha-Connections specifies how many connections to make to the edge
|
|
||||||
haConnectionsFlag = "ha-connections"
|
|
||||||
|
|
||||||
// sshPortFlag is the port on localhost the cloudflared ssh server will run on
|
|
||||||
sshPortFlag = "local-ssh-port"
|
|
||||||
|
|
||||||
// sshIdleTimeoutFlag defines the duration a SSH session can remain idle before being closed
|
|
||||||
sshIdleTimeoutFlag = "ssh-idle-timeout"
|
|
||||||
|
|
||||||
// sshMaxTimeoutFlag defines the max duration a SSH session can remain open for
|
|
||||||
sshMaxTimeoutFlag = "ssh-max-timeout"
|
|
||||||
|
|
||||||
// bucketNameFlag is the bucket name to use for the SSH log uploader
|
|
||||||
bucketNameFlag = "bucket-name"
|
|
||||||
|
|
||||||
// regionNameFlag is the AWS region name to use for the SSH log uploader
|
|
||||||
regionNameFlag = "region-name"
|
|
||||||
|
|
||||||
// secretIDFlag is the Secret id of SSH log uploader
|
|
||||||
secretIDFlag = "secret-id"
|
|
||||||
|
|
||||||
// accessKeyIDFlag is the Access key id of SSH log uploader
|
|
||||||
accessKeyIDFlag = "access-key-id"
|
|
||||||
|
|
||||||
// sessionTokenIDFlag is the Session token of SSH log uploader
|
|
||||||
sessionTokenIDFlag = "session-token"
|
|
||||||
|
|
||||||
// s3URLFlag is the S3 URL of SSH log uploader (e.g. don't use AWS s3 and use google storage bucket instead)
|
|
||||||
s3URLFlag = "s3-url-host"
|
|
||||||
|
|
||||||
// hostKeyPath is the path of the dir to save SSH host keys too
|
|
||||||
hostKeyPath = "host-key-path"
|
|
||||||
|
|
||||||
// rpcTimeout is how long to wait for a Capnp RPC request to the edge
|
|
||||||
rpcTimeout = "rpc-timeout"
|
|
||||||
|
|
||||||
// writeStreamTimeout sets if we should have a timeout when writing data to a stream towards the destination (edge/origin).
|
|
||||||
writeStreamTimeout = "write-stream-timeout"
|
|
||||||
|
|
||||||
// quicDisablePathMTUDiscovery sets if QUIC should not perform PTMU discovery and use a smaller (safe) packet size.
|
|
||||||
// Packets will then be at most 1252 (IPv4) / 1232 (IPv6) bytes in size.
|
|
||||||
// Note that this may result in packet drops for UDP proxying, since we expect being able to send at least 1280 bytes of inner packets.
|
|
||||||
quicDisablePathMTUDiscovery = "quic-disable-pmtu-discovery"
|
|
||||||
|
|
||||||
// quicConnLevelFlowControlLimit controls the max flow control limit allocated for a QUIC connection. This controls how much data is the
|
|
||||||
// receiver willing to buffer. Once the limit is reached, the sender will send a DATA_BLOCKED frame to indicate it has more data to write,
|
|
||||||
// but it's blocked by flow control
|
|
||||||
quicConnLevelFlowControlLimit = "quic-connection-level-flow-control-limit"
|
|
||||||
// quicStreamLevelFlowControlLimit is similar to quicConnLevelFlowControlLimit but for each QUIC stream. When the sender is blocked,
|
|
||||||
// it will send a STREAM_DATA_BLOCKED frame
|
|
||||||
quicStreamLevelFlowControlLimit = "quic-stream-level-flow-control-limit"
|
|
||||||
|
|
||||||
// uiFlag is to enable launching cloudflared in interactive UI mode
|
|
||||||
uiFlag = "ui"
|
|
||||||
|
|
||||||
LogFieldCommand = "command"
|
LogFieldCommand = "command"
|
||||||
LogFieldExpandedPath = "expandedPath"
|
LogFieldExpandedPath = "expandedPath"
|
||||||
LogFieldPIDPathname = "pidPathname"
|
LogFieldPIDPathname = "pidPathname"
|
||||||
|
@ -116,7 +62,6 @@ Eg. cloudflared tunnel --url localhost:8080/.
|
||||||
Please note that Quick Tunnels are meant to be ephemeral and should only be used for testing purposes.
|
Please note that Quick Tunnels are meant to be ephemeral and should only be used for testing purposes.
|
||||||
For production usage, we recommend creating Named Tunnels. (https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/)
|
For production usage, we recommend creating Named Tunnels. (https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/)
|
||||||
`
|
`
|
||||||
connectorLabelFlag = "label"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -131,9 +76,9 @@ var (
|
||||||
// however this approach is not maintainble in the long-term.
|
// however this approach is not maintainble in the long-term.
|
||||||
nonSecretFlagsList = []string{
|
nonSecretFlagsList = []string{
|
||||||
"config",
|
"config",
|
||||||
"autoupdate-freq",
|
cfdflags.AutoUpdateFreq,
|
||||||
"no-autoupdate",
|
cfdflags.NoAutoUpdate,
|
||||||
"metrics",
|
cfdflags.Metrics,
|
||||||
"pidfile",
|
"pidfile",
|
||||||
"url",
|
"url",
|
||||||
"hello-world",
|
"hello-world",
|
||||||
|
@ -166,55 +111,55 @@ var (
|
||||||
"bastion",
|
"bastion",
|
||||||
"proxy-address",
|
"proxy-address",
|
||||||
"proxy-port",
|
"proxy-port",
|
||||||
"loglevel",
|
cfdflags.LogLevel,
|
||||||
"transport-loglevel",
|
cfdflags.TransportLogLevel,
|
||||||
"logfile",
|
cfdflags.LogFile,
|
||||||
"log-directory",
|
cfdflags.LogDirectory,
|
||||||
"trace-output",
|
cfdflags.TraceOutput,
|
||||||
"proxy-dns",
|
cfdflags.ProxyDns,
|
||||||
"proxy-dns-port",
|
"proxy-dns-port",
|
||||||
"proxy-dns-address",
|
"proxy-dns-address",
|
||||||
"proxy-dns-upstream",
|
"proxy-dns-upstream",
|
||||||
"proxy-dns-max-upstream-conns",
|
"proxy-dns-max-upstream-conns",
|
||||||
"proxy-dns-bootstrap",
|
"proxy-dns-bootstrap",
|
||||||
"is-autoupdated",
|
cfdflags.IsAutoUpdated,
|
||||||
"edge",
|
cfdflags.Edge,
|
||||||
"region",
|
cfdflags.Region,
|
||||||
"edge-ip-version",
|
cfdflags.EdgeIpVersion,
|
||||||
"edge-bind-address",
|
cfdflags.EdgeBindAddress,
|
||||||
"cacert",
|
"cacert",
|
||||||
"hostname",
|
"hostname",
|
||||||
"id",
|
"id",
|
||||||
"lb-pool",
|
cfdflags.LBPool,
|
||||||
"api-url",
|
cfdflags.ApiURL,
|
||||||
"metrics-update-freq",
|
cfdflags.MetricsUpdateFreq,
|
||||||
"tag",
|
cfdflags.Tag,
|
||||||
"heartbeat-interval",
|
"heartbeat-interval",
|
||||||
"heartbeat-count",
|
"heartbeat-count",
|
||||||
"max-edge-addr-retries",
|
cfdflags.MaxEdgeAddrRetries,
|
||||||
"retries",
|
cfdflags.Retries,
|
||||||
"ha-connections",
|
"ha-connections",
|
||||||
"rpc-timeout",
|
"rpc-timeout",
|
||||||
"write-stream-timeout",
|
"write-stream-timeout",
|
||||||
"quic-disable-pmtu-discovery",
|
"quic-disable-pmtu-discovery",
|
||||||
"quic-connection-level-flow-control-limit",
|
"quic-connection-level-flow-control-limit",
|
||||||
"quic-stream-level-flow-control-limit",
|
"quic-stream-level-flow-control-limit",
|
||||||
"label",
|
cfdflags.ConnectorLabel,
|
||||||
"grace-period",
|
cfdflags.GracePeriod,
|
||||||
"compression-quality",
|
"compression-quality",
|
||||||
"use-reconnect-token",
|
"use-reconnect-token",
|
||||||
"dial-edge-timeout",
|
"dial-edge-timeout",
|
||||||
"stdin-control",
|
"stdin-control",
|
||||||
"name",
|
cfdflags.Name,
|
||||||
"ui",
|
cfdflags.Ui,
|
||||||
"quick-service",
|
"quick-service",
|
||||||
"max-fetch-size",
|
"max-fetch-size",
|
||||||
"post-quantum",
|
cfdflags.PostQuantum,
|
||||||
"management-diagnostics",
|
"management-diagnostics",
|
||||||
"protocol",
|
cfdflags.Protocol,
|
||||||
"overwrite-dns",
|
"overwrite-dns",
|
||||||
"help",
|
"help",
|
||||||
"max-active-flows",
|
cfdflags.MaxActiveFlows,
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -263,7 +208,7 @@ then protect with Cloudflare Access).
|
||||||
B) Locally reachable TCP/UDP-based private services to Cloudflare connected private users in the same account, e.g.,
|
B) Locally reachable TCP/UDP-based private services to Cloudflare connected private users in the same account, e.g.,
|
||||||
those enrolled to a Zero Trust WARP Client.
|
those enrolled to a Zero Trust WARP Client.
|
||||||
|
|
||||||
You can manage your Tunnels via dash.teams.cloudflare.com. This approach will only require you to run a single command
|
You can manage your Tunnels via one.dash.cloudflare.com. This approach will only require you to run a single command
|
||||||
later in each machine where you wish to run a Tunnel.
|
later in each machine where you wish to run a Tunnel.
|
||||||
|
|
||||||
Alternatively, you can manage your Tunnels via the command line. Begin by obtaining a certificate to be able to do so:
|
Alternatively, you can manage your Tunnels via the command line. Begin by obtaining a certificate to be able to do so:
|
||||||
|
@ -299,7 +244,7 @@ func TunnelCommand(c *cli.Context) error {
|
||||||
// --name required
|
// --name required
|
||||||
// --url or --hello-world required
|
// --url or --hello-world required
|
||||||
// --hostname optional
|
// --hostname optional
|
||||||
if name := c.String("name"); name != "" {
|
if name := c.String(cfdflags.Name); name != "" {
|
||||||
hostname, err := validation.ValidateHostname(c.String("hostname"))
|
hostname, err := validation.ValidateHostname(c.String("hostname"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "Invalid hostname provided")
|
return errors.Wrap(err, "Invalid hostname provided")
|
||||||
|
@ -316,7 +261,7 @@ func TunnelCommand(c *cli.Context) error {
|
||||||
// A unauthenticated named tunnel hosted on <random>.<quick-tunnels-service>.com
|
// A unauthenticated named tunnel hosted on <random>.<quick-tunnels-service>.com
|
||||||
// We don't support running proxy-dns and a quick tunnel at the same time as the same process
|
// We don't support running proxy-dns and a quick tunnel at the same time as the same process
|
||||||
shouldRunQuickTunnel := c.IsSet("url") || c.IsSet(ingress.HelloWorldFlag)
|
shouldRunQuickTunnel := c.IsSet("url") || c.IsSet(ingress.HelloWorldFlag)
|
||||||
if !c.IsSet("proxy-dns") && c.String("quick-service") != "" && shouldRunQuickTunnel {
|
if !c.IsSet(cfdflags.ProxyDns) && c.String("quick-service") != "" && shouldRunQuickTunnel {
|
||||||
return RunQuickTunnel(sc)
|
return RunQuickTunnel(sc)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -330,7 +275,7 @@ func TunnelCommand(c *cli.Context) error {
|
||||||
return errDeprecatedClassicTunnel
|
return errDeprecatedClassicTunnel
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.IsSet("proxy-dns") {
|
if c.IsSet(cfdflags.ProxyDns) {
|
||||||
if shouldRunQuickTunnel {
|
if shouldRunQuickTunnel {
|
||||||
return fmt.Errorf("running a quick tunnel with `proxy-dns` is not supported")
|
return fmt.Errorf("running a quick tunnel with `proxy-dns` is not supported")
|
||||||
}
|
}
|
||||||
|
@ -377,7 +322,7 @@ func runAdhocNamedTunnel(sc *subcommandContext, name, credentialsOutputPath stri
|
||||||
|
|
||||||
func routeFromFlag(c *cli.Context) (route cfapi.HostnameRoute, ok bool) {
|
func routeFromFlag(c *cli.Context) (route cfapi.HostnameRoute, ok bool) {
|
||||||
if hostname := c.String("hostname"); hostname != "" {
|
if hostname := c.String("hostname"); hostname != "" {
|
||||||
if lbPool := c.String("lb-pool"); lbPool != "" {
|
if lbPool := c.String(cfdflags.LBPool); lbPool != "" {
|
||||||
return cfapi.NewLBRoute(hostname, lbPool), true
|
return cfapi.NewLBRoute(hostname, lbPool), true
|
||||||
}
|
}
|
||||||
return cfapi.NewDNSRoute(hostname, c.Bool(overwriteDNSFlagName)), true
|
return cfapi.NewDNSRoute(hostname, c.Bool(overwriteDNSFlagName)), true
|
||||||
|
@ -407,7 +352,7 @@ func StartServer(
|
||||||
log.Info().Msg(config.ErrNoConfigFile.Error())
|
log.Info().Msg(config.ErrNoConfigFile.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.IsSet("trace-output") {
|
if c.IsSet(cfdflags.TraceOutput) {
|
||||||
tmpTraceFile, err := os.CreateTemp("", "trace")
|
tmpTraceFile, err := os.CreateTemp("", "trace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Err(err).Msg("Failed to create new temporary file to save trace output")
|
log.Err(err).Msg("Failed to create new temporary file to save trace output")
|
||||||
|
@ -419,7 +364,7 @@ func StartServer(
|
||||||
if err := tmpTraceFile.Close(); err != nil {
|
if err := tmpTraceFile.Close(); err != nil {
|
||||||
traceLog.Err(err).Msg("Failed to close temporary trace output file")
|
traceLog.Err(err).Msg("Failed to close temporary trace output file")
|
||||||
}
|
}
|
||||||
traceOutputFilepath := c.String("trace-output")
|
traceOutputFilepath := c.String(cfdflags.TraceOutput)
|
||||||
if err := os.Rename(tmpTraceFile.Name(), traceOutputFilepath); err != nil {
|
if err := os.Rename(tmpTraceFile.Name(), traceOutputFilepath); err != nil {
|
||||||
traceLog.
|
traceLog.
|
||||||
Err(err).
|
Err(err).
|
||||||
|
@ -449,7 +394,7 @@ func StartServer(
|
||||||
|
|
||||||
go waitForSignal(graceShutdownC, log)
|
go waitForSignal(graceShutdownC, log)
|
||||||
|
|
||||||
if c.IsSet("proxy-dns") {
|
if c.IsSet(cfdflags.ProxyDns) {
|
||||||
dnsReadySignal := make(chan struct{})
|
dnsReadySignal := make(chan struct{})
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -471,7 +416,7 @@ func StartServer(
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
autoupdater := updater.NewAutoUpdater(
|
autoupdater := updater.NewAutoUpdater(
|
||||||
c.Bool("no-autoupdate"), c.Duration("autoupdate-freq"), &listeners, log,
|
c.Bool(cfdflags.NoAutoUpdate), c.Duration(cfdflags.AutoUpdateFreq), &listeners, log,
|
||||||
)
|
)
|
||||||
errC <- autoupdater.Run(ctx)
|
errC <- autoupdater.Run(ctx)
|
||||||
}()
|
}()
|
||||||
|
@ -527,7 +472,7 @@ func StartServer(
|
||||||
c.Bool("management-diagnostics"),
|
c.Bool("management-diagnostics"),
|
||||||
serviceIP,
|
serviceIP,
|
||||||
clientID,
|
clientID,
|
||||||
c.String(connectorLabelFlag),
|
c.String(cfdflags.ConnectorLabel),
|
||||||
logger.ManagementLogger.Log,
|
logger.ManagementLogger.Log,
|
||||||
logger.ManagementLogger,
|
logger.ManagementLogger,
|
||||||
)
|
)
|
||||||
|
@ -579,7 +524,7 @@ func StartServer(
|
||||||
errC <- metrics.ServeMetrics(metricsListener, ctx, metricsConfig, log)
|
errC <- metrics.ServeMetrics(metricsListener, ctx, metricsConfig, log)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
reconnectCh := make(chan supervisor.ReconnectSignal, c.Int(haConnectionsFlag))
|
reconnectCh := make(chan supervisor.ReconnectSignal, c.Int(cfdflags.HaConnections))
|
||||||
if c.IsSet("stdin-control") {
|
if c.IsSet("stdin-control") {
|
||||||
log.Info().Msg("Enabling control through stdin")
|
log.Info().Msg("Enabling control through stdin")
|
||||||
go stdinControl(reconnectCh, log)
|
go stdinControl(reconnectCh, log)
|
||||||
|
@ -699,31 +644,31 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
|
||||||
flags = append(flags, []cli.Flag{
|
flags = append(flags, []cli.Flag{
|
||||||
credentialsFileFlag,
|
credentialsFileFlag,
|
||||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
Name: "is-autoupdated",
|
Name: cfdflags.IsAutoUpdated,
|
||||||
Usage: "Signal the new process that Cloudflare Tunnel connector has been autoupdated",
|
Usage: "Signal the new process that Cloudflare Tunnel connector has been autoupdated",
|
||||||
Value: false,
|
Value: false,
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||||
Name: "edge",
|
Name: cfdflags.Edge,
|
||||||
Usage: "Address of the Cloudflare tunnel server. Only works in Cloudflare's internal testing environment.",
|
Usage: "Address of the Cloudflare tunnel server. Only works in Cloudflare's internal testing environment.",
|
||||||
EnvVars: []string{"TUNNEL_EDGE"},
|
EnvVars: []string{"TUNNEL_EDGE"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: "region",
|
Name: cfdflags.Region,
|
||||||
Usage: "Cloudflare Edge region to connect to. Omit or set to empty to connect to the global region.",
|
Usage: "Cloudflare Edge region to connect to. Omit or set to empty to connect to the global region.",
|
||||||
EnvVars: []string{"TUNNEL_REGION"},
|
EnvVars: []string{"TUNNEL_REGION"},
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: "edge-ip-version",
|
Name: cfdflags.EdgeIpVersion,
|
||||||
Usage: "Cloudflare Edge IP address version to connect with. {4, 6, auto}",
|
Usage: "Cloudflare Edge IP address version to connect with. {4, 6, auto}",
|
||||||
EnvVars: []string{"TUNNEL_EDGE_IP_VERSION"},
|
EnvVars: []string{"TUNNEL_EDGE_IP_VERSION"},
|
||||||
Value: "4",
|
Value: "4",
|
||||||
Hidden: false,
|
Hidden: false,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: "edge-bind-address",
|
Name: cfdflags.EdgeBindAddress,
|
||||||
Usage: "Bind to IP address for outgoing connections to Cloudflare Edge.",
|
Usage: "Bind to IP address for outgoing connections to Cloudflare Edge.",
|
||||||
EnvVars: []string{"TUNNEL_EDGE_BIND_ADDRESS"},
|
EnvVars: []string{"TUNNEL_EDGE_BIND_ADDRESS"},
|
||||||
Hidden: false,
|
Hidden: false,
|
||||||
|
@ -747,7 +692,7 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: "lb-pool",
|
Name: cfdflags.LBPool,
|
||||||
Usage: "The name of a (new/existing) load balancing pool to add this origin to.",
|
Usage: "The name of a (new/existing) load balancing pool to add this origin to.",
|
||||||
EnvVars: []string{"TUNNEL_LB_POOL"},
|
EnvVars: []string{"TUNNEL_LB_POOL"},
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
|
@ -771,21 +716,21 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: "api-url",
|
Name: cfdflags.ApiURL,
|
||||||
Usage: "Base URL for Cloudflare API v4",
|
Usage: "Base URL for Cloudflare API v4",
|
||||||
EnvVars: []string{"TUNNEL_API_URL"},
|
EnvVars: []string{"TUNNEL_API_URL"},
|
||||||
Value: "https://api.cloudflare.com/client/v4",
|
Value: "https://api.cloudflare.com/client/v4",
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
Name: "metrics-update-freq",
|
Name: cfdflags.MetricsUpdateFreq,
|
||||||
Usage: "Frequency to update tunnel metrics",
|
Usage: "Frequency to update tunnel metrics",
|
||||||
Value: time.Second * 5,
|
Value: time.Second * 5,
|
||||||
EnvVars: []string{"TUNNEL_METRICS_UPDATE_FREQ"},
|
EnvVars: []string{"TUNNEL_METRICS_UPDATE_FREQ"},
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||||
Name: "tag",
|
Name: cfdflags.Tag,
|
||||||
Usage: "Custom tags used to identify this tunnel via added HTTP request headers to the origin, in format `KEY=VALUE`. Multiple tags may be specified.",
|
Usage: "Custom tags used to identify this tunnel via added HTTP request headers to the origin, in format `KEY=VALUE`. Multiple tags may be specified.",
|
||||||
EnvVars: []string{"TUNNEL_TAG"},
|
EnvVars: []string{"TUNNEL_TAG"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
|
@ -804,64 +749,64 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewIntFlag(&cli.IntFlag{
|
altsrc.NewIntFlag(&cli.IntFlag{
|
||||||
Name: "max-edge-addr-retries",
|
Name: cfdflags.MaxEdgeAddrRetries,
|
||||||
Usage: "Maximum number of times to retry on edge addrs before falling back to a lower protocol",
|
Usage: "Maximum number of times to retry on edge addrs before falling back to a lower protocol",
|
||||||
Value: 8,
|
Value: 8,
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
// Note TUN-3758 , we use Int because UInt is not supported with altsrc
|
// Note TUN-3758 , we use Int because UInt is not supported with altsrc
|
||||||
altsrc.NewIntFlag(&cli.IntFlag{
|
altsrc.NewIntFlag(&cli.IntFlag{
|
||||||
Name: "retries",
|
Name: cfdflags.Retries,
|
||||||
Value: 5,
|
Value: 5,
|
||||||
Usage: "Maximum number of retries for connection/protocol errors.",
|
Usage: "Maximum number of retries for connection/protocol errors.",
|
||||||
EnvVars: []string{"TUNNEL_RETRIES"},
|
EnvVars: []string{"TUNNEL_RETRIES"},
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
}),
|
}),
|
||||||
altsrc.NewIntFlag(&cli.IntFlag{
|
altsrc.NewIntFlag(&cli.IntFlag{
|
||||||
Name: haConnectionsFlag,
|
Name: cfdflags.HaConnections,
|
||||||
Value: 4,
|
Value: 4,
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
Name: rpcTimeout,
|
Name: cfdflags.RpcTimeout,
|
||||||
Value: 5 * time.Second,
|
Value: 5 * time.Second,
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
Name: writeStreamTimeout,
|
Name: cfdflags.WriteStreamTimeout,
|
||||||
EnvVars: []string{"TUNNEL_STREAM_WRITE_TIMEOUT"},
|
EnvVars: []string{"TUNNEL_STREAM_WRITE_TIMEOUT"},
|
||||||
Usage: "Use this option to add a stream write timeout for connections when writing towards the origin or edge. Default is 0 which disables the write timeout.",
|
Usage: "Use this option to add a stream write timeout for connections when writing towards the origin or edge. Default is 0 which disables the write timeout.",
|
||||||
Value: 0 * time.Second,
|
Value: 0 * time.Second,
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
Name: quicDisablePathMTUDiscovery,
|
Name: cfdflags.QuicDisablePathMTUDiscovery,
|
||||||
EnvVars: []string{"TUNNEL_DISABLE_QUIC_PMTU"},
|
EnvVars: []string{"TUNNEL_DISABLE_QUIC_PMTU"},
|
||||||
Usage: "Use this option to disable PTMU discovery for QUIC connections. This will result in lower packet sizes. Not however, that this may cause instability for UDP proxying.",
|
Usage: "Use this option to disable PTMU discovery for QUIC connections. This will result in lower packet sizes. Not however, that this may cause instability for UDP proxying.",
|
||||||
Value: false,
|
Value: false,
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewIntFlag(&cli.IntFlag{
|
altsrc.NewIntFlag(&cli.IntFlag{
|
||||||
Name: quicConnLevelFlowControlLimit,
|
Name: cfdflags.QuicConnLevelFlowControlLimit,
|
||||||
EnvVars: []string{"TUNNEL_QUIC_CONN_LEVEL_FLOW_CONTROL_LIMIT"},
|
EnvVars: []string{"TUNNEL_QUIC_CONN_LEVEL_FLOW_CONTROL_LIMIT"},
|
||||||
Usage: "Use this option to change the connection-level flow control limit for QUIC transport.",
|
Usage: "Use this option to change the connection-level flow control limit for QUIC transport.",
|
||||||
Value: 30 * (1 << 20), // 30 MB
|
Value: 30 * (1 << 20), // 30 MB
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewIntFlag(&cli.IntFlag{
|
altsrc.NewIntFlag(&cli.IntFlag{
|
||||||
Name: quicStreamLevelFlowControlLimit,
|
Name: cfdflags.QuicStreamLevelFlowControlLimit,
|
||||||
EnvVars: []string{"TUNNEL_QUIC_STREAM_LEVEL_FLOW_CONTROL_LIMIT"},
|
EnvVars: []string{"TUNNEL_QUIC_STREAM_LEVEL_FLOW_CONTROL_LIMIT"},
|
||||||
Usage: "Use this option to change the connection-level flow control limit for QUIC transport.",
|
Usage: "Use this option to change the connection-level flow control limit for QUIC transport.",
|
||||||
Value: 6 * (1 << 20), // 6 MB
|
Value: 6 * (1 << 20), // 6 MB
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: connectorLabelFlag,
|
Name: cfdflags.ConnectorLabel,
|
||||||
Usage: "Use this option to give a meaningful label to a specific connector. When a tunnel starts up, a connector id unique to the tunnel is generated. This is a uuid. To make it easier to identify a connector, we will use the hostname of the machine the tunnel is running on along with the connector ID. This option exists if one wants to have more control over what their individual connectors are called.",
|
Usage: "Use this option to give a meaningful label to a specific connector. When a tunnel starts up, a connector id unique to the tunnel is generated. This is a uuid. To make it easier to identify a connector, we will use the hostname of the machine the tunnel is running on along with the connector ID. This option exists if one wants to have more control over what their individual connectors are called.",
|
||||||
Value: "",
|
Value: "",
|
||||||
}),
|
}),
|
||||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
Name: "grace-period",
|
Name: cfdflags.GracePeriod,
|
||||||
Usage: "When cloudflared receives SIGINT/SIGTERM it will stop accepting new requests, wait for in-progress requests to terminate, then shutdown. Waiting for in-progress requests will timeout after this grace period, or when a second SIGTERM/SIGINT is received.",
|
Usage: "When cloudflared receives SIGINT/SIGTERM it will stop accepting new requests, wait for in-progress requests to terminate, then shutdown. Waiting for in-progress requests will timeout after this grace period, or when a second SIGTERM/SIGINT is received.",
|
||||||
Value: time.Second * 30,
|
Value: time.Second * 30,
|
||||||
EnvVars: []string{"TUNNEL_GRACE_PERIOD"},
|
EnvVars: []string{"TUNNEL_GRACE_PERIOD"},
|
||||||
|
@ -897,14 +842,14 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
|
||||||
Value: false,
|
Value: false,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: "name",
|
Name: cfdflags.Name,
|
||||||
Aliases: []string{"n"},
|
Aliases: []string{"n"},
|
||||||
EnvVars: []string{"TUNNEL_NAME"},
|
EnvVars: []string{"TUNNEL_NAME"},
|
||||||
Usage: "Stable name to identify the tunnel. Using this flag will create, route and run a tunnel. For production usage, execute each command separately",
|
Usage: "Stable name to identify the tunnel. Using this flag will create, route and run a tunnel. For production usage, execute each command separately",
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
}),
|
}),
|
||||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
Name: uiFlag,
|
Name: cfdflags.Ui,
|
||||||
Usage: "(depreciated) Launch tunnel UI. Tunnel logs are scrollable via 'j', 'k', or arrow keys.",
|
Usage: "(depreciated) Launch tunnel UI. Tunnel logs are scrollable via 'j', 'k', or arrow keys.",
|
||||||
Value: false,
|
Value: false,
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
|
@ -922,7 +867,7 @@ func tunnelFlags(shouldHide bool) []cli.Flag {
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
Name: "post-quantum",
|
Name: cfdflags.PostQuantum,
|
||||||
Usage: "When given creates an experimental post-quantum secure tunnel",
|
Usage: "When given creates an experimental post-quantum secure tunnel",
|
||||||
Aliases: []string{"pq"},
|
Aliases: []string{"pq"},
|
||||||
EnvVars: []string{"TUNNEL_POST_QUANTUM"},
|
EnvVars: []string{"TUNNEL_POST_QUANTUM"},
|
||||||
|
@ -950,27 +895,27 @@ func configureCloudflaredFlags(shouldHide bool) []cli.Flag {
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
},
|
},
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: credentials.OriginCertFlag,
|
Name: cfdflags.OriginCert,
|
||||||
Usage: "Path to the certificate generated for your origin when you run cloudflared login.",
|
Usage: "Path to the certificate generated for your origin when you run cloudflared login.",
|
||||||
EnvVars: []string{"TUNNEL_ORIGIN_CERT"},
|
EnvVars: []string{"TUNNEL_ORIGIN_CERT"},
|
||||||
Value: credentials.FindDefaultOriginCertPath(),
|
Value: credentials.FindDefaultOriginCertPath(),
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
}),
|
}),
|
||||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
Name: "autoupdate-freq",
|
Name: cfdflags.AutoUpdateFreq,
|
||||||
Usage: fmt.Sprintf("Autoupdate frequency. Default is %v.", updater.DefaultCheckUpdateFreq),
|
Usage: fmt.Sprintf("Autoupdate frequency. Default is %v.", updater.DefaultCheckUpdateFreq),
|
||||||
Value: updater.DefaultCheckUpdateFreq,
|
Value: updater.DefaultCheckUpdateFreq,
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
}),
|
}),
|
||||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
Name: "no-autoupdate",
|
Name: cfdflags.NoAutoUpdate,
|
||||||
Usage: "Disable periodic check for updates, restarting the server with the new version.",
|
Usage: "Disable periodic check for updates, restarting the server with the new version.",
|
||||||
EnvVars: []string{"NO_AUTOUPDATE"},
|
EnvVars: []string{"NO_AUTOUPDATE"},
|
||||||
Value: false,
|
Value: false,
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: "metrics",
|
Name: cfdflags.Metrics,
|
||||||
Value: metrics.GetMetricsDefaultAddress(metrics.Runtime),
|
Value: metrics.GetMetricsDefaultAddress(metrics.Runtime),
|
||||||
Usage: fmt.Sprintf(
|
Usage: fmt.Sprintf(
|
||||||
`Listen address for metrics reporting. If no address is passed cloudflared will try to bind to %v.
|
`Listen address for metrics reporting. If no address is passed cloudflared will try to bind to %v.
|
||||||
|
@ -1134,62 +1079,62 @@ func legacyTunnelFlag(msg string) string {
|
||||||
func sshFlags(shouldHide bool) []cli.Flag {
|
func sshFlags(shouldHide bool) []cli.Flag {
|
||||||
return []cli.Flag{
|
return []cli.Flag{
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: sshPortFlag,
|
Name: cfdflags.SshPort,
|
||||||
Usage: "Localhost port that cloudflared SSH server will run on",
|
Usage: "Localhost port that cloudflared SSH server will run on",
|
||||||
Value: "2222",
|
Value: "2222",
|
||||||
EnvVars: []string{"LOCAL_SSH_PORT"},
|
EnvVars: []string{"LOCAL_SSH_PORT"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
Name: sshIdleTimeoutFlag,
|
Name: cfdflags.SshIdleTimeout,
|
||||||
Usage: "Connection timeout after no activity",
|
Usage: "Connection timeout after no activity",
|
||||||
EnvVars: []string{"SSH_IDLE_TIMEOUT"},
|
EnvVars: []string{"SSH_IDLE_TIMEOUT"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewDurationFlag(&cli.DurationFlag{
|
altsrc.NewDurationFlag(&cli.DurationFlag{
|
||||||
Name: sshMaxTimeoutFlag,
|
Name: cfdflags.SshMaxTimeout,
|
||||||
Usage: "Absolute connection timeout",
|
Usage: "Absolute connection timeout",
|
||||||
EnvVars: []string{"SSH_MAX_TIMEOUT"},
|
EnvVars: []string{"SSH_MAX_TIMEOUT"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: bucketNameFlag,
|
Name: cfdflags.SshLogUploaderBucketName,
|
||||||
Usage: "Bucket name of where to upload SSH logs",
|
Usage: "Bucket name of where to upload SSH logs",
|
||||||
EnvVars: []string{"BUCKET_ID"},
|
EnvVars: []string{"BUCKET_ID"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: regionNameFlag,
|
Name: cfdflags.SshLogUploaderRegionName,
|
||||||
Usage: "Region name of where to upload SSH logs",
|
Usage: "Region name of where to upload SSH logs",
|
||||||
EnvVars: []string{"REGION_ID"},
|
EnvVars: []string{"REGION_ID"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: secretIDFlag,
|
Name: cfdflags.SshLogUploaderSecretID,
|
||||||
Usage: "Secret ID of where to upload SSH logs",
|
Usage: "Secret ID of where to upload SSH logs",
|
||||||
EnvVars: []string{"SECRET_ID"},
|
EnvVars: []string{"SECRET_ID"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: accessKeyIDFlag,
|
Name: cfdflags.SshLogUploaderAccessKeyID,
|
||||||
Usage: "Access Key ID of where to upload SSH logs",
|
Usage: "Access Key ID of where to upload SSH logs",
|
||||||
EnvVars: []string{"ACCESS_CLIENT_ID"},
|
EnvVars: []string{"ACCESS_CLIENT_ID"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: sessionTokenIDFlag,
|
Name: cfdflags.SshLogUploaderSessionTokenID,
|
||||||
Usage: "Session Token to use in the configuration of SSH logs uploading",
|
Usage: "Session Token to use in the configuration of SSH logs uploading",
|
||||||
EnvVars: []string{"SESSION_TOKEN_ID"},
|
EnvVars: []string{"SESSION_TOKEN_ID"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewStringFlag(&cli.StringFlag{
|
altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: s3URLFlag,
|
Name: cfdflags.SshLogUploaderS3URL,
|
||||||
Usage: "S3 url of where to upload SSH logs",
|
Usage: "S3 url of where to upload SSH logs",
|
||||||
EnvVars: []string{"S3_URL"},
|
EnvVars: []string{"S3_URL"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
}),
|
}),
|
||||||
altsrc.NewPathFlag(&cli.PathFlag{
|
altsrc.NewPathFlag(&cli.PathFlag{
|
||||||
Name: hostKeyPath,
|
Name: cfdflags.HostKeyPath,
|
||||||
Usage: "Absolute path of directory to save SSH host keys in",
|
Usage: "Absolute path of directory to save SSH host keys in",
|
||||||
EnvVars: []string{"HOST_KEY_PATH"},
|
EnvVars: []string{"HOST_KEY_PATH"},
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
|
@ -1229,7 +1174,7 @@ func sshFlags(shouldHide bool) []cli.Flag {
|
||||||
func configureProxyDNSFlags(shouldHide bool) []cli.Flag {
|
func configureProxyDNSFlags(shouldHide bool) []cli.Flag {
|
||||||
return []cli.Flag{
|
return []cli.Flag{
|
||||||
altsrc.NewBoolFlag(&cli.BoolFlag{
|
altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
Name: "proxy-dns",
|
Name: cfdflags.ProxyDns,
|
||||||
Usage: "Run a DNS over HTTPS proxy server.",
|
Usage: "Run a DNS over HTTPS proxy server.",
|
||||||
EnvVars: []string{"TUNNEL_DNS"},
|
EnvVars: []string{"TUNNEL_DNS"},
|
||||||
Hidden: shouldHide,
|
Hidden: shouldHide,
|
||||||
|
@ -1327,7 +1272,7 @@ func nonSecretCliFlags(log *zerolog.Logger, cli *cli.Context, flagInclusionList
|
||||||
}
|
}
|
||||||
|
|
||||||
switch flag {
|
switch flag {
|
||||||
case logger.LogDirectoryFlag, logger.LogFileFlag:
|
case cfdflags.LogDirectory, cfdflags.LogFile:
|
||||||
{
|
{
|
||||||
absolute, err := filepath.Abs(value)
|
absolute, err := filepath.Abs(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -1,15 +0,0 @@
|
||||||
package tunnel
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/features"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestDedup(t *testing.T) {
|
|
||||||
expected := []string{"a", "b"}
|
|
||||||
actual := features.Dedup([]string{"a", "b", "a"})
|
|
||||||
require.ElementsMatch(t, expected, actual)
|
|
||||||
}
|
|
|
@ -18,6 +18,7 @@ import (
|
||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
||||||
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/config"
|
"github.com/cloudflare/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/edgediscovery"
|
"github.com/cloudflare/cloudflared/edgediscovery"
|
||||||
|
@ -33,12 +34,26 @@ import (
|
||||||
const (
|
const (
|
||||||
secretValue = "*****"
|
secretValue = "*****"
|
||||||
icmpFunnelTimeout = time.Second * 10
|
icmpFunnelTimeout = time.Second * 10
|
||||||
|
fedRampRegion = "fed" // const string denoting the region used to connect to FEDRamp servers
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
secretFlags = [2]*altsrc.StringFlag{credentialsContentsFlag, tunnelTokenFlag}
|
secretFlags = [2]*altsrc.StringFlag{credentialsContentsFlag, tunnelTokenFlag}
|
||||||
|
|
||||||
configFlags = []string{"autoupdate-freq", "no-autoupdate", "retries", "protocol", "loglevel", "transport-loglevel", "origincert", "metrics", "metrics-update-freq", "edge-ip-version", "edge-bind-address", "max-active-flows"}
|
configFlags = []string{
|
||||||
|
flags.AutoUpdateFreq,
|
||||||
|
flags.NoAutoUpdate,
|
||||||
|
flags.Retries,
|
||||||
|
flags.Protocol,
|
||||||
|
flags.LogLevel,
|
||||||
|
flags.TransportLogLevel,
|
||||||
|
flags.OriginCert,
|
||||||
|
flags.Metrics,
|
||||||
|
flags.MetricsUpdateFreq,
|
||||||
|
flags.EdgeIpVersion,
|
||||||
|
flags.EdgeBindAddress,
|
||||||
|
flags.MaxActiveFlows,
|
||||||
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
func logClientOptions(c *cli.Context, log *zerolog.Logger) {
|
func logClientOptions(c *cli.Context, log *zerolog.Logger) {
|
||||||
|
@ -96,8 +111,8 @@ func isSecretEnvVar(key string) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
func dnsProxyStandAlone(c *cli.Context, namedTunnel *connection.TunnelProperties) bool {
|
func dnsProxyStandAlone(c *cli.Context, namedTunnel *connection.TunnelProperties) bool {
|
||||||
return c.IsSet("proxy-dns") &&
|
return c.IsSet(flags.ProxyDns) &&
|
||||||
!(c.IsSet("name") || // adhoc-named tunnel
|
!(c.IsSet(flags.Name) || // adhoc-named tunnel
|
||||||
c.IsSet(ingress.HelloWorldFlag) || // quick or named tunnel
|
c.IsSet(ingress.HelloWorldFlag) || // quick or named tunnel
|
||||||
namedTunnel != nil) // named tunnel
|
namedTunnel != nil) // named tunnel
|
||||||
}
|
}
|
||||||
|
@ -115,16 +130,17 @@ func prepareTunnelConfig(
|
||||||
return nil, nil, errors.Wrap(err, "can't generate connector UUID")
|
return nil, nil, errors.Wrap(err, "can't generate connector UUID")
|
||||||
}
|
}
|
||||||
log.Info().Msgf("Generated Connector ID: %s", clientID)
|
log.Info().Msgf("Generated Connector ID: %s", clientID)
|
||||||
tags, err := NewTagSliceFromCLI(c.StringSlice("tag"))
|
tags, err := NewTagSliceFromCLI(c.StringSlice(flags.Tag))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Err(err).Msg("Tag parse failure")
|
log.Err(err).Msg("Tag parse failure")
|
||||||
return nil, nil, errors.Wrap(err, "Tag parse failure")
|
return nil, nil, errors.Wrap(err, "Tag parse failure")
|
||||||
}
|
}
|
||||||
tags = append(tags, pogs.Tag{Name: "ID", Value: clientID.String()})
|
tags = append(tags, pogs.Tag{Name: "ID", Value: clientID.String()})
|
||||||
|
|
||||||
transportProtocol := c.String("protocol")
|
transportProtocol := c.String(flags.Protocol)
|
||||||
|
isPostQuantumEnforced := c.Bool(flags.PostQuantum)
|
||||||
|
|
||||||
featureSelector, err := features.NewFeatureSelector(ctx, namedTunnel.Credentials.AccountTag, c.StringSlice("features"), c.Bool("post-quantum"), log)
|
featureSelector, err := features.NewFeatureSelector(ctx, namedTunnel.Credentials.AccountTag, c.StringSlice(flags.Features), c.Bool(flags.PostQuantum), log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, errors.Wrap(err, "Failed to create feature selector")
|
return nil, nil, errors.Wrap(err, "Failed to create feature selector")
|
||||||
}
|
}
|
||||||
|
@ -150,7 +166,7 @@ func prepareTunnelConfig(
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
protocolSelector, err := connection.NewProtocolSelector(transportProtocol, namedTunnel.Credentials.AccountTag, c.IsSet(TunnelTokenFlag), c.Bool("post-quantum"), edgediscovery.ProtocolPercentage, connection.ResolveTTL, log)
|
protocolSelector, err := connection.NewProtocolSelector(transportProtocol, namedTunnel.Credentials.AccountTag, c.IsSet(TunnelTokenFlag), isPostQuantumEnforced, edgediscovery.ProtocolPercentage, connection.ResolveTTL, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -176,11 +192,11 @@ func prepareTunnelConfig(
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
edgeIPVersion, err := parseConfigIPVersion(c.String("edge-ip-version"))
|
edgeIPVersion, err := parseConfigIPVersion(c.String(flags.EdgeIpVersion))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
edgeBindAddr, err := parseConfigBindAddress(c.String("edge-bind-address"))
|
edgeBindAddr, err := parseConfigBindAddress(c.String(flags.EdgeBindAddress))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
@ -193,36 +209,50 @@ func prepareTunnelConfig(
|
||||||
log.Warn().Str("edgeIPVersion", edgeIPVersion.String()).Err(err).Msg("Overriding edge-ip-version")
|
log.Warn().Str("edgeIPVersion", edgeIPVersion.String()).Err(err).Msg("Overriding edge-ip-version")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
region := c.String(flags.Region)
|
||||||
|
endpoint := namedTunnel.Credentials.Endpoint
|
||||||
|
var resolvedRegion string
|
||||||
|
// set resolvedRegion to either the region passed as argument
|
||||||
|
// or to the endpoint in the credentials.
|
||||||
|
// Region and endpoint are interchangeable
|
||||||
|
if region != "" && endpoint != "" {
|
||||||
|
return nil, nil, fmt.Errorf("region provided with a token that has an endpoint")
|
||||||
|
} else if region != "" {
|
||||||
|
resolvedRegion = region
|
||||||
|
} else if endpoint != "" {
|
||||||
|
resolvedRegion = endpoint
|
||||||
|
}
|
||||||
|
|
||||||
tunnelConfig := &supervisor.TunnelConfig{
|
tunnelConfig := &supervisor.TunnelConfig{
|
||||||
GracePeriod: gracePeriod,
|
GracePeriod: gracePeriod,
|
||||||
ReplaceExisting: c.Bool("force"),
|
ReplaceExisting: c.Bool(flags.Force),
|
||||||
OSArch: info.OSArch(),
|
OSArch: info.OSArch(),
|
||||||
ClientID: clientID.String(),
|
ClientID: clientID.String(),
|
||||||
EdgeAddrs: c.StringSlice("edge"),
|
EdgeAddrs: c.StringSlice(flags.Edge),
|
||||||
Region: c.String("region"),
|
Region: resolvedRegion,
|
||||||
EdgeIPVersion: edgeIPVersion,
|
EdgeIPVersion: edgeIPVersion,
|
||||||
EdgeBindAddr: edgeBindAddr,
|
EdgeBindAddr: edgeBindAddr,
|
||||||
HAConnections: c.Int(haConnectionsFlag),
|
HAConnections: c.Int(flags.HaConnections),
|
||||||
IsAutoupdated: c.Bool("is-autoupdated"),
|
IsAutoupdated: c.Bool(flags.IsAutoUpdated),
|
||||||
LBPool: c.String("lb-pool"),
|
LBPool: c.String(flags.LBPool),
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
Log: log,
|
Log: log,
|
||||||
LogTransport: logTransport,
|
LogTransport: logTransport,
|
||||||
Observer: observer,
|
Observer: observer,
|
||||||
ReportedVersion: info.Version(),
|
ReportedVersion: info.Version(),
|
||||||
// Note TUN-3758 , we use Int because UInt is not supported with altsrc
|
// Note TUN-3758 , we use Int because UInt is not supported with altsrc
|
||||||
Retries: uint(c.Int("retries")), // nolint: gosec
|
Retries: uint(c.Int(flags.Retries)), // nolint: gosec
|
||||||
RunFromTerminal: isRunningFromTerminal(),
|
RunFromTerminal: isRunningFromTerminal(),
|
||||||
NamedTunnel: namedTunnel,
|
NamedTunnel: namedTunnel,
|
||||||
ProtocolSelector: protocolSelector,
|
ProtocolSelector: protocolSelector,
|
||||||
EdgeTLSConfigs: edgeTLSConfigs,
|
EdgeTLSConfigs: edgeTLSConfigs,
|
||||||
FeatureSelector: featureSelector,
|
FeatureSelector: featureSelector,
|
||||||
MaxEdgeAddrRetries: uint8(c.Int("max-edge-addr-retries")), // nolint: gosec
|
MaxEdgeAddrRetries: uint8(c.Int(flags.MaxEdgeAddrRetries)), // nolint: gosec
|
||||||
RPCTimeout: c.Duration(rpcTimeout),
|
RPCTimeout: c.Duration(flags.RpcTimeout),
|
||||||
WriteStreamTimeout: c.Duration(writeStreamTimeout),
|
WriteStreamTimeout: c.Duration(flags.WriteStreamTimeout),
|
||||||
DisableQUICPathMTUDiscovery: c.Bool(quicDisablePathMTUDiscovery),
|
DisableQUICPathMTUDiscovery: c.Bool(flags.QuicDisablePathMTUDiscovery),
|
||||||
QUICConnectionLevelFlowControlLimit: c.Uint64(quicConnLevelFlowControlLimit),
|
QUICConnectionLevelFlowControlLimit: c.Uint64(flags.QuicConnLevelFlowControlLimit),
|
||||||
QUICStreamLevelFlowControlLimit: c.Uint64(quicStreamLevelFlowControlLimit),
|
QUICStreamLevelFlowControlLimit: c.Uint64(flags.QuicStreamLevelFlowControlLimit),
|
||||||
}
|
}
|
||||||
icmpRouter, err := newICMPRouter(c, log)
|
icmpRouter, err := newICMPRouter(c, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -234,7 +264,7 @@ func prepareTunnelConfig(
|
||||||
Ingress: &ingressRules,
|
Ingress: &ingressRules,
|
||||||
WarpRouting: ingress.NewWarpRoutingConfig(&cfg.WarpRouting),
|
WarpRouting: ingress.NewWarpRoutingConfig(&cfg.WarpRouting),
|
||||||
ConfigurationFlags: parseConfigFlags(c),
|
ConfigurationFlags: parseConfigFlags(c),
|
||||||
WriteTimeout: c.Duration(writeStreamTimeout),
|
WriteTimeout: tunnelConfig.WriteStreamTimeout,
|
||||||
}
|
}
|
||||||
return tunnelConfig, orchestratorConfig, nil
|
return tunnelConfig, orchestratorConfig, nil
|
||||||
}
|
}
|
||||||
|
@ -252,9 +282,9 @@ func parseConfigFlags(c *cli.Context) map[string]string {
|
||||||
}
|
}
|
||||||
|
|
||||||
func gracePeriod(c *cli.Context) (time.Duration, error) {
|
func gracePeriod(c *cli.Context) (time.Duration, error) {
|
||||||
period := c.Duration("grace-period")
|
period := c.Duration(flags.GracePeriod)
|
||||||
if period > connection.MaxGracePeriod {
|
if period > connection.MaxGracePeriod {
|
||||||
return time.Duration(0), fmt.Errorf("grace-period must be equal or less than %v", connection.MaxGracePeriod)
|
return time.Duration(0), fmt.Errorf("%s must be equal or less than %v", flags.GracePeriod, connection.MaxGracePeriod)
|
||||||
}
|
}
|
||||||
return period, nil
|
return period, nil
|
||||||
}
|
}
|
||||||
|
@ -337,14 +367,14 @@ func newICMPRouter(c *cli.Context, logger *zerolog.Logger) (ingress.ICMPRouterSe
|
||||||
}
|
}
|
||||||
|
|
||||||
func determineICMPSources(c *cli.Context, logger *zerolog.Logger) (netip.Addr, netip.Addr, error) {
|
func determineICMPSources(c *cli.Context, logger *zerolog.Logger) (netip.Addr, netip.Addr, error) {
|
||||||
ipv4Src, err := determineICMPv4Src(c.String("icmpv4-src"), logger)
|
ipv4Src, err := determineICMPv4Src(c.String(flags.ICMPV4Src), logger)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return netip.Addr{}, netip.Addr{}, errors.Wrap(err, "failed to determine IPv4 source address for ICMP proxy")
|
return netip.Addr{}, netip.Addr{}, errors.Wrap(err, "failed to determine IPv4 source address for ICMP proxy")
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Info().Msgf("ICMP proxy will use %s as source for IPv4", ipv4Src)
|
logger.Info().Msgf("ICMP proxy will use %s as source for IPv4", ipv4Src)
|
||||||
|
|
||||||
ipv6Src, zone, err := determineICMPv6Src(c.String("icmpv6-src"), logger, ipv4Src)
|
ipv6Src, zone, err := determineICMPv6Src(c.String(flags.ICMPV6Src), logger, ipv4Src)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return netip.Addr{}, netip.Addr{}, errors.Wrap(err, "failed to determine IPv6 source address for ICMP proxy")
|
return netip.Addr{}, netip.Addr{}, errors.Wrap(err, "failed to determine IPv6 source address for ICMP proxy")
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,6 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
|
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/config"
|
"github.com/cloudflare/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/credentials"
|
"github.com/cloudflare/cloudflared/credentials"
|
||||||
|
|
||||||
|
@ -57,7 +58,7 @@ func newSearchByID(id uuid.UUID, c *cli.Context, log *zerolog.Logger, fs fileSys
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s searchByID) Path() (string, error) {
|
func (s searchByID) Path() (string, error) {
|
||||||
originCertPath := s.c.String(credentials.OriginCertFlag)
|
originCertPath := s.c.String(cfdflags.OriginCert)
|
||||||
originCertLog := s.log.With().
|
originCertLog := s.log.With().
|
||||||
Str("originCertPath", originCertPath).
|
Str("originCertPath", originCertPath).
|
||||||
Logger()
|
Logger()
|
||||||
|
|
|
@ -67,7 +67,7 @@ func login(c *cli.Context) error {
|
||||||
|
|
||||||
path, ok, err := checkForExistingCert()
|
path, ok, err := checkForExistingCert()
|
||||||
if ok {
|
if ok {
|
||||||
fmt.Fprintf(os.Stdout, "You have an existing certificate at %s which login would overwrite.\nIf this is intentional, please move or delete that file then run this command again.\n", path)
|
log.Error().Err(err).Msgf("You have an existing certificate at %s which login would overwrite.\nIf this is intentional, please move or delete that file then run this command again.\n", path)
|
||||||
return nil
|
return nil
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -78,7 +78,8 @@ func login(c *cli.Context) error {
|
||||||
callbackStoreURL = c.String(callbackURLParamName)
|
callbackStoreURL = c.String(callbackURLParamName)
|
||||||
)
|
)
|
||||||
|
|
||||||
if c.Bool(fedRAMPParamName) {
|
isFEDRamp := c.Bool(fedRAMPParamName)
|
||||||
|
if isFEDRamp {
|
||||||
baseloginURL = fedBaseLoginURL
|
baseloginURL = fedBaseLoginURL
|
||||||
callbackStoreURL = fedCallbackStoreURL
|
callbackStoreURL = fedCallbackStoreURL
|
||||||
}
|
}
|
||||||
|
@ -99,7 +100,23 @@ func login(c *cli.Context) error {
|
||||||
log,
|
log,
|
||||||
)
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "Failed to write the certificate due to the following error:\n%v\n\nYour browser will download the certificate instead. You will have to manually\ncopy it to the following path:\n\n%s\n", err, path)
|
log.Error().Err(err).Msgf("Failed to write the certificate.\n\nYour browser will download the certificate instead. You will have to manually\ncopy it to the following path:\n\n%s\n", path)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
cert, err := credentials.DecodeOriginCert(resourceData)
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("failed to decode origin certificate")
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if isFEDRamp {
|
||||||
|
cert.Endpoint = credentials.FedEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
|
resourceData, err = cert.EncodeOriginCert()
|
||||||
|
if err != nil {
|
||||||
|
log.Error().Err(err).Msg("failed to encode origin certificate")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,7 +124,7 @@ func login(c *cli.Context) error {
|
||||||
return errors.Wrap(err, fmt.Sprintf("error writing cert to %s", path))
|
return errors.Wrap(err, fmt.Sprintf("error writing cert to %s", path))
|
||||||
}
|
}
|
||||||
|
|
||||||
fmt.Fprintf(os.Stdout, "You have successfully logged in.\nIf you wish to copy your credentials to a server, they have been saved to:\n%s\n", path)
|
log.Info().Msgf("You have successfully logged in.\nIf you wish to copy your credentials to a server, they have been saved to:\n%s\n", path)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -82,13 +83,13 @@ func RunQuickTunnel(sc *subcommandContext) error {
|
||||||
sc.log.Info().Msg(line)
|
sc.log.Info().Msg(line)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !sc.c.IsSet("protocol") {
|
if !sc.c.IsSet(flags.Protocol) {
|
||||||
sc.c.Set("protocol", "quic")
|
_ = sc.c.Set(flags.Protocol, "quic")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Override the number of connections used. Quick tunnels shouldn't be used for production usage,
|
// Override the number of connections used. Quick tunnels shouldn't be used for production usage,
|
||||||
// so, use a single connection instead.
|
// so, use a single connection instead.
|
||||||
sc.c.Set(haConnectionsFlag, "1")
|
_ = sc.c.Set(flags.HaConnections, "1")
|
||||||
return StartServer(
|
return StartServer(
|
||||||
sc.c,
|
sc.c,
|
||||||
buildInfo,
|
buildInfo,
|
||||||
|
|
|
@ -9,22 +9,26 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cfapi"
|
"github.com/cloudflare/cloudflared/cfapi"
|
||||||
|
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/credentials"
|
"github.com/cloudflare/cloudflared/credentials"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/cloudflare/cloudflared/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
type errInvalidJSONCredential struct {
|
const fedRampBaseApiURL = "https://api.fed.cloudflare.com/client/v4"
|
||||||
|
|
||||||
|
type invalidJSONCredentialError struct {
|
||||||
err error
|
err error
|
||||||
path string
|
path string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e errInvalidJSONCredential) Error() string {
|
func (e invalidJSONCredentialError) Error() string {
|
||||||
return "Invalid JSON when parsing tunnel credentials file"
|
return "Invalid JSON when parsing tunnel credentials file"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,8 +55,13 @@ func newSubcommandContext(c *cli.Context) (*subcommandContext, error) {
|
||||||
// Returns something that can find the given tunnel's credentials file.
|
// Returns something that can find the given tunnel's credentials file.
|
||||||
func (sc *subcommandContext) credentialFinder(tunnelID uuid.UUID) CredFinder {
|
func (sc *subcommandContext) credentialFinder(tunnelID uuid.UUID) CredFinder {
|
||||||
if path := sc.c.String(CredFileFlag); path != "" {
|
if path := sc.c.String(CredFileFlag); path != "" {
|
||||||
|
// Expand path if CredFileFlag contains `~`
|
||||||
|
absPath, err := homedir.Expand(path)
|
||||||
|
if err != nil {
|
||||||
return newStaticPath(path, sc.fs)
|
return newStaticPath(path, sc.fs)
|
||||||
}
|
}
|
||||||
|
return newStaticPath(absPath, sc.fs)
|
||||||
|
}
|
||||||
return newSearchByID(tunnelID, sc.c, sc.log, sc.fs)
|
return newSearchByID(tunnelID, sc.c, sc.log, sc.fs)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -64,7 +73,16 @@ func (sc *subcommandContext) client() (cfapi.Client, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
sc.tunnelstoreClient, err = cred.Client(sc.c.String("api-url"), buildInfo.UserAgent(), sc.log)
|
|
||||||
|
var apiURL string
|
||||||
|
if cred.IsFEDEndpoint() {
|
||||||
|
sc.log.Info().Str("api-url", fedRampBaseApiURL).Msg("using fedramp base api")
|
||||||
|
apiURL = fedRampBaseApiURL
|
||||||
|
} else {
|
||||||
|
apiURL = sc.c.String(cfdflags.ApiURL)
|
||||||
|
}
|
||||||
|
|
||||||
|
sc.tunnelstoreClient, err = cred.Client(apiURL, buildInfo.UserAgent(), sc.log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -73,7 +91,7 @@ func (sc *subcommandContext) client() (cfapi.Client, error) {
|
||||||
|
|
||||||
func (sc *subcommandContext) credential() (*credentials.User, error) {
|
func (sc *subcommandContext) credential() (*credentials.User, error) {
|
||||||
if sc.userCredential == nil {
|
if sc.userCredential == nil {
|
||||||
uc, err := credentials.Read(sc.c.String(credentials.OriginCertFlag), sc.log)
|
uc, err := credentials.Read(sc.c.String(cfdflags.OriginCert), sc.log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -94,13 +112,13 @@ func (sc *subcommandContext) readTunnelCredentials(credFinder CredFinder) (conne
|
||||||
|
|
||||||
var credentials connection.Credentials
|
var credentials connection.Credentials
|
||||||
if err = json.Unmarshal(body, &credentials); err != nil {
|
if err = json.Unmarshal(body, &credentials); err != nil {
|
||||||
if strings.HasSuffix(filePath, ".pem") {
|
if filepath.Ext(filePath) == ".pem" {
|
||||||
return connection.Credentials{}, fmt.Errorf("The tunnel credentials file should be .json but you gave a .pem. " +
|
return connection.Credentials{}, fmt.Errorf("The tunnel credentials file should be .json but you gave a .pem. " +
|
||||||
"The tunnel credentials file was originally created by `cloudflared tunnel create`. " +
|
"The tunnel credentials file was originally created by `cloudflared tunnel create`. " +
|
||||||
"You may have accidentally used the filepath to cert.pem, which is generated by `cloudflared tunnel " +
|
"You may have accidentally used the filepath to cert.pem, which is generated by `cloudflared tunnel " +
|
||||||
"login`.")
|
"login`.")
|
||||||
}
|
}
|
||||||
return connection.Credentials{}, errInvalidJSONCredential{path: filePath, err: err}
|
return connection.Credentials{}, invalidJSONCredentialError{path: filePath, err: err}
|
||||||
}
|
}
|
||||||
return credentials, nil
|
return credentials, nil
|
||||||
}
|
}
|
||||||
|
@ -122,7 +140,7 @@ func (sc *subcommandContext) create(name string, credentialsFilePath string, sec
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Couldn't decode tunnel secret from base64")
|
return nil, errors.Wrap(err, "Couldn't decode tunnel secret from base64")
|
||||||
}
|
}
|
||||||
tunnelSecret = []byte(decodedSecret)
|
tunnelSecret = decodedSecret
|
||||||
if len(tunnelSecret) < 32 {
|
if len(tunnelSecret) < 32 {
|
||||||
return nil, errors.New("Decoded tunnel secret must be at least 32 bytes long")
|
return nil, errors.New("Decoded tunnel secret must be at least 32 bytes long")
|
||||||
}
|
}
|
||||||
|
@ -160,7 +178,7 @@ func (sc *subcommandContext) create(name string, credentialsFilePath string, sec
|
||||||
errorLines = append(errorLines, fmt.Sprintf("Cloudflared tried to delete the tunnel for you, but encountered an error. You should use `cloudflared tunnel delete %v` to delete the tunnel yourself, because the tunnel can't be run without the tunnelfile.", tunnel.ID))
|
errorLines = append(errorLines, fmt.Sprintf("Cloudflared tried to delete the tunnel for you, but encountered an error. You should use `cloudflared tunnel delete %v` to delete the tunnel yourself, because the tunnel can't be run without the tunnelfile.", tunnel.ID))
|
||||||
errorLines = append(errorLines, fmt.Sprintf("The delete tunnel error is: %v", deleteErr))
|
errorLines = append(errorLines, fmt.Sprintf("The delete tunnel error is: %v", deleteErr))
|
||||||
} else {
|
} else {
|
||||||
errorLines = append(errorLines, fmt.Sprintf("The tunnel was deleted, because the tunnel can't be run without the credentials file"))
|
errorLines = append(errorLines, "The tunnel was deleted, because the tunnel can't be run without the credentials file")
|
||||||
}
|
}
|
||||||
errorMsg := strings.Join(errorLines, "\n")
|
errorMsg := strings.Join(errorLines, "\n")
|
||||||
return nil, errors.New(errorMsg)
|
return nil, errors.New(errorMsg)
|
||||||
|
@ -189,7 +207,7 @@ func (sc *subcommandContext) list(filter *cfapi.TunnelFilter) ([]*cfapi.Tunnel,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (sc *subcommandContext) delete(tunnelIDs []uuid.UUID) error {
|
func (sc *subcommandContext) delete(tunnelIDs []uuid.UUID) error {
|
||||||
forceFlagSet := sc.c.Bool("force")
|
forceFlagSet := sc.c.Bool(cfdflags.Force)
|
||||||
|
|
||||||
client, err := sc.client()
|
client, err := sc.client()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -229,7 +247,7 @@ func (sc *subcommandContext) findCredentials(tunnelID uuid.UUID) (connection.Cre
|
||||||
var err error
|
var err error
|
||||||
if credentialsContents := sc.c.String(CredContentsFlag); credentialsContents != "" {
|
if credentialsContents := sc.c.String(CredContentsFlag); credentialsContents != "" {
|
||||||
if err = json.Unmarshal([]byte(credentialsContents), &credentials); err != nil {
|
if err = json.Unmarshal([]byte(credentialsContents), &credentials); err != nil {
|
||||||
err = errInvalidJSONCredential{path: "TUNNEL_CRED_CONTENTS", err: err}
|
err = invalidJSONCredentialError{path: "TUNNEL_CRED_CONTENTS", err: err}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
credFinder := sc.credentialFinder(tunnelID)
|
credFinder := sc.credentialFinder(tunnelID)
|
||||||
|
@ -245,7 +263,7 @@ func (sc *subcommandContext) findCredentials(tunnelID uuid.UUID) (connection.Cre
|
||||||
func (sc *subcommandContext) run(tunnelID uuid.UUID) error {
|
func (sc *subcommandContext) run(tunnelID uuid.UUID) error {
|
||||||
credentials, err := sc.findCredentials(tunnelID)
|
credentials, err := sc.findCredentials(tunnelID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(errInvalidJSONCredential); ok {
|
if e, ok := err.(invalidJSONCredentialError); ok {
|
||||||
sc.log.Error().Msgf("The credentials file at %s contained invalid JSON. This is probably caused by passing the wrong filepath. Reminder: the credentials file is a .json file created via `cloudflared tunnel create`.", e.path)
|
sc.log.Error().Msgf("The credentials file at %s contained invalid JSON. This is probably caused by passing the wrong filepath. Reminder: the credentials file is a .json file created via `cloudflared tunnel create`.", e.path)
|
||||||
sc.log.Error().Msgf("Invalid JSON when parsing credentials file: %s", e.err.Error())
|
sc.log.Error().Msgf("Invalid JSON when parsing credentials file: %s", e.err.Error())
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,15 +16,16 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
homedir "github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"github.com/urfave/cli/v2/altsrc"
|
"github.com/urfave/cli/v2/altsrc"
|
||||||
"golang.org/x/net/idna"
|
"golang.org/x/net/idna"
|
||||||
yaml "gopkg.in/yaml.v3"
|
"gopkg.in/yaml.v3"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cfapi"
|
"github.com/cloudflare/cloudflared/cfapi"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
||||||
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/updater"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/updater"
|
||||||
"github.com/cloudflare/cloudflared/config"
|
"github.com/cloudflare/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
|
@ -40,6 +41,7 @@ const (
|
||||||
CredFileFlag = "credentials-file"
|
CredFileFlag = "credentials-file"
|
||||||
CredContentsFlag = "credentials-contents"
|
CredContentsFlag = "credentials-contents"
|
||||||
TunnelTokenFlag = "token"
|
TunnelTokenFlag = "token"
|
||||||
|
TunnelTokenFileFlag = "token-file"
|
||||||
overwriteDNSFlagName = "overwrite-dns"
|
overwriteDNSFlagName = "overwrite-dns"
|
||||||
noDiagLogsFlagName = "no-diag-logs"
|
noDiagLogsFlagName = "no-diag-logs"
|
||||||
noDiagMetricsFlagName = "no-diag-metrics"
|
noDiagMetricsFlagName = "no-diag-metrics"
|
||||||
|
@ -48,7 +50,6 @@ const (
|
||||||
noDiagNetworkFlagName = "no-diag-network"
|
noDiagNetworkFlagName = "no-diag-network"
|
||||||
diagContainerIDFlagName = "diag-container-id"
|
diagContainerIDFlagName = "diag-container-id"
|
||||||
diagPodFlagName = "diag-pod-id"
|
diagPodFlagName = "diag-pod-id"
|
||||||
metricsFlagName = "metrics"
|
|
||||||
|
|
||||||
LogFieldTunnelID = "tunnelID"
|
LogFieldTunnelID = "tunnelID"
|
||||||
)
|
)
|
||||||
|
@ -60,7 +61,7 @@ var (
|
||||||
Usage: "Include deleted tunnels in the list",
|
Usage: "Include deleted tunnels in the list",
|
||||||
}
|
}
|
||||||
listNameFlag = &cli.StringFlag{
|
listNameFlag = &cli.StringFlag{
|
||||||
Name: "name",
|
Name: flags.Name,
|
||||||
Aliases: []string{"n"},
|
Aliases: []string{"n"},
|
||||||
Usage: "List tunnels with the given `NAME`",
|
Usage: "List tunnels with the given `NAME`",
|
||||||
}
|
}
|
||||||
|
@ -108,7 +109,7 @@ var (
|
||||||
EnvVars: []string{"TUNNEL_LIST_INVERT_SORT"},
|
EnvVars: []string{"TUNNEL_LIST_INVERT_SORT"},
|
||||||
}
|
}
|
||||||
featuresFlag = altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
featuresFlag = altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||||
Name: "features",
|
Name: flags.Features,
|
||||||
Aliases: []string{"F"},
|
Aliases: []string{"F"},
|
||||||
Usage: "Opt into various features that are still being developed or tested.",
|
Usage: "Opt into various features that are still being developed or tested.",
|
||||||
})
|
})
|
||||||
|
@ -126,18 +127,23 @@ var (
|
||||||
})
|
})
|
||||||
tunnelTokenFlag = altsrc.NewStringFlag(&cli.StringFlag{
|
tunnelTokenFlag = altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: TunnelTokenFlag,
|
Name: TunnelTokenFlag,
|
||||||
Usage: "The Tunnel token. When provided along with credentials, this will take precedence.",
|
Usage: "The Tunnel token. When provided along with credentials, this will take precedence. Also takes precedence over token-file",
|
||||||
EnvVars: []string{"TUNNEL_TOKEN"},
|
EnvVars: []string{"TUNNEL_TOKEN"},
|
||||||
})
|
})
|
||||||
|
tunnelTokenFileFlag = altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
|
Name: TunnelTokenFileFlag,
|
||||||
|
Usage: "Filepath at which to read the tunnel token. When provided along with credentials, this will take precedence.",
|
||||||
|
EnvVars: []string{"TUNNEL_TOKEN_FILE"},
|
||||||
|
})
|
||||||
forceDeleteFlag = &cli.BoolFlag{
|
forceDeleteFlag = &cli.BoolFlag{
|
||||||
Name: "force",
|
Name: flags.Force,
|
||||||
Aliases: []string{"f"},
|
Aliases: []string{"f"},
|
||||||
Usage: "Deletes a tunnel even if tunnel is connected and it has dependencies associated to it. (eg. IP routes)." +
|
Usage: "Deletes a tunnel even if tunnel is connected and it has dependencies associated to it. (eg. IP routes)." +
|
||||||
" It is not possible to delete tunnels that have connections or non-deleted dependencies, without this flag.",
|
" It is not possible to delete tunnels that have connections or non-deleted dependencies, without this flag.",
|
||||||
EnvVars: []string{"TUNNEL_RUN_FORCE_OVERWRITE"},
|
EnvVars: []string{"TUNNEL_RUN_FORCE_OVERWRITE"},
|
||||||
}
|
}
|
||||||
selectProtocolFlag = altsrc.NewStringFlag(&cli.StringFlag{
|
selectProtocolFlag = altsrc.NewStringFlag(&cli.StringFlag{
|
||||||
Name: "protocol",
|
Name: flags.Protocol,
|
||||||
Value: connection.AutoSelectFlag,
|
Value: connection.AutoSelectFlag,
|
||||||
Aliases: []string{"p"},
|
Aliases: []string{"p"},
|
||||||
Usage: fmt.Sprintf("Protocol implementation to connect with Cloudflare's edge network. %s", connection.AvailableProtocolFlagMessage),
|
Usage: fmt.Sprintf("Protocol implementation to connect with Cloudflare's edge network. %s", connection.AvailableProtocolFlagMessage),
|
||||||
|
@ -145,7 +151,7 @@ var (
|
||||||
Hidden: true,
|
Hidden: true,
|
||||||
})
|
})
|
||||||
postQuantumFlag = altsrc.NewBoolFlag(&cli.BoolFlag{
|
postQuantumFlag = altsrc.NewBoolFlag(&cli.BoolFlag{
|
||||||
Name: "post-quantum",
|
Name: flags.PostQuantum,
|
||||||
Usage: "When given creates an experimental post-quantum secure tunnel",
|
Usage: "When given creates an experimental post-quantum secure tunnel",
|
||||||
Aliases: []string{"pq"},
|
Aliases: []string{"pq"},
|
||||||
EnvVars: []string{"TUNNEL_POST_QUANTUM"},
|
EnvVars: []string{"TUNNEL_POST_QUANTUM"},
|
||||||
|
@ -181,17 +187,17 @@ var (
|
||||||
EnvVars: []string{"TUNNEL_CREATE_SECRET"},
|
EnvVars: []string{"TUNNEL_CREATE_SECRET"},
|
||||||
}
|
}
|
||||||
icmpv4SrcFlag = &cli.StringFlag{
|
icmpv4SrcFlag = &cli.StringFlag{
|
||||||
Name: "icmpv4-src",
|
Name: flags.ICMPV4Src,
|
||||||
Usage: "Source address to send/receive ICMPv4 messages. If not provided cloudflared will dial a local address to determine the source IP or fallback to 0.0.0.0.",
|
Usage: "Source address to send/receive ICMPv4 messages. If not provided cloudflared will dial a local address to determine the source IP or fallback to 0.0.0.0.",
|
||||||
EnvVars: []string{"TUNNEL_ICMPV4_SRC"},
|
EnvVars: []string{"TUNNEL_ICMPV4_SRC"},
|
||||||
}
|
}
|
||||||
icmpv6SrcFlag = &cli.StringFlag{
|
icmpv6SrcFlag = &cli.StringFlag{
|
||||||
Name: "icmpv6-src",
|
Name: flags.ICMPV6Src,
|
||||||
Usage: "Source address and the interface name to send/receive ICMPv6 messages. If not provided cloudflared will dial a local address to determine the source IP or fallback to ::.",
|
Usage: "Source address and the interface name to send/receive ICMPv6 messages. If not provided cloudflared will dial a local address to determine the source IP or fallback to ::.",
|
||||||
EnvVars: []string{"TUNNEL_ICMPV6_SRC"},
|
EnvVars: []string{"TUNNEL_ICMPV6_SRC"},
|
||||||
}
|
}
|
||||||
metricsFlag = &cli.StringFlag{
|
metricsFlag = &cli.StringFlag{
|
||||||
Name: metricsFlagName,
|
Name: flags.Metrics,
|
||||||
Usage: "The metrics server address i.e.: 127.0.0.1:12345. If your instance is running in a Docker/Kubernetes environment you need to setup port forwarding for your application.",
|
Usage: "The metrics server address i.e.: 127.0.0.1:12345. If your instance is running in a Docker/Kubernetes environment you need to setup port forwarding for your application.",
|
||||||
Value: "",
|
Value: "",
|
||||||
}
|
}
|
||||||
|
@ -231,7 +237,7 @@ var (
|
||||||
Value: false,
|
Value: false,
|
||||||
}
|
}
|
||||||
maxActiveFlowsFlag = &cli.Uint64Flag{
|
maxActiveFlowsFlag = &cli.Uint64Flag{
|
||||||
Name: "max-active-flows",
|
Name: flags.MaxActiveFlows,
|
||||||
Usage: "Overrides the remote configuration for max active private network flows (TCP/UDP) that this cloudflared instance supports",
|
Usage: "Overrides the remote configuration for max active private network flows (TCP/UDP) that this cloudflared instance supports",
|
||||||
EnvVars: []string{"TUNNEL_MAX_ACTIVE_FLOWS"},
|
EnvVars: []string{"TUNNEL_MAX_ACTIVE_FLOWS"},
|
||||||
}
|
}
|
||||||
|
@ -337,7 +343,7 @@ func listCommand(c *cli.Context) error {
|
||||||
if !c.Bool("show-deleted") {
|
if !c.Bool("show-deleted") {
|
||||||
filter.NoDeleted()
|
filter.NoDeleted()
|
||||||
}
|
}
|
||||||
if name := c.String("name"); name != "" {
|
if name := c.String(flags.Name); name != "" {
|
||||||
filter.ByName(name)
|
filter.ByName(name)
|
||||||
}
|
}
|
||||||
if namePrefix := c.String("name-prefix"); namePrefix != "" {
|
if namePrefix := c.String("name-prefix"); namePrefix != "" {
|
||||||
|
@ -467,9 +473,9 @@ func buildReadyCommand() *cli.Command {
|
||||||
}
|
}
|
||||||
|
|
||||||
func readyCommand(c *cli.Context) error {
|
func readyCommand(c *cli.Context) error {
|
||||||
metricsOpts := c.String("metrics")
|
metricsOpts := c.String(flags.Metrics)
|
||||||
if !c.IsSet("metrics") {
|
if !c.IsSet(flags.Metrics) {
|
||||||
return fmt.Errorf("--metrics has to be provided")
|
return errors.New("--metrics has to be provided")
|
||||||
}
|
}
|
||||||
|
|
||||||
requestURL := fmt.Sprintf("http://%s/ready", metricsOpts)
|
requestURL := fmt.Sprintf("http://%s/ready", metricsOpts)
|
||||||
|
@ -708,6 +714,7 @@ func buildRunCommand() *cli.Command {
|
||||||
selectProtocolFlag,
|
selectProtocolFlag,
|
||||||
featuresFlag,
|
featuresFlag,
|
||||||
tunnelTokenFlag,
|
tunnelTokenFlag,
|
||||||
|
tunnelTokenFileFlag,
|
||||||
icmpv4SrcFlag,
|
icmpv4SrcFlag,
|
||||||
icmpv6SrcFlag,
|
icmpv6SrcFlag,
|
||||||
maxActiveFlowsFlag,
|
maxActiveFlowsFlag,
|
||||||
|
@ -748,12 +755,22 @@ func runCommand(c *cli.Context) error {
|
||||||
"your origin will not be reachable. You should remove the `hostname` property to avoid this warning.")
|
"your origin will not be reachable. You should remove the `hostname` property to avoid this warning.")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
tokenStr := c.String(TunnelTokenFlag)
|
||||||
|
// Check if tokenStr is blank before checking for tokenFile
|
||||||
|
if tokenStr == "" {
|
||||||
|
if tokenFile := c.String(TunnelTokenFileFlag); tokenFile != "" {
|
||||||
|
data, err := os.ReadFile(tokenFile)
|
||||||
|
if err != nil {
|
||||||
|
return cliutil.UsageError("Failed to read token file: " + err.Error())
|
||||||
|
}
|
||||||
|
tokenStr = strings.TrimSpace(string(data))
|
||||||
|
}
|
||||||
|
}
|
||||||
// Check if token is provided and if not use default tunnelID flag method
|
// Check if token is provided and if not use default tunnelID flag method
|
||||||
if tokenStr := c.String(TunnelTokenFlag); tokenStr != "" {
|
if tokenStr != "" {
|
||||||
if token, err := ParseToken(tokenStr); err == nil {
|
if token, err := ParseToken(tokenStr); err == nil {
|
||||||
return sc.runWithCredentials(token.Credentials())
|
return sc.runWithCredentials(token.Credentials())
|
||||||
}
|
}
|
||||||
|
|
||||||
return cliutil.UsageError("Provided Tunnel token is not valid.")
|
return cliutil.UsageError("Provided Tunnel token is not valid.")
|
||||||
} else {
|
} else {
|
||||||
tunnelRef := c.Args().First()
|
tunnelRef := c.Args().First()
|
||||||
|
@ -1079,7 +1096,7 @@ func diagCommand(ctx *cli.Context) error {
|
||||||
log := sctx.log
|
log := sctx.log
|
||||||
options := diagnostic.Options{
|
options := diagnostic.Options{
|
||||||
KnownAddresses: metrics.GetMetricsKnownAddresses(metrics.Runtime),
|
KnownAddresses: metrics.GetMetricsKnownAddresses(metrics.Runtime),
|
||||||
Address: sctx.c.String(metricsFlagName),
|
Address: sctx.c.String(flags.Metrics),
|
||||||
ContainerID: sctx.c.String(diagContainerIDFlagName),
|
ContainerID: sctx.c.String(diagContainerIDFlagName),
|
||||||
PodID: sctx.c.String(diagPodFlagName),
|
PodID: sctx.c.String(diagPodFlagName),
|
||||||
Toggles: diagnostic.Toggles{
|
Toggles: diagnostic.Toggles{
|
||||||
|
|
|
@ -22,7 +22,7 @@ var (
|
||||||
Usage: "The ID or name of the virtual network to which the route is associated to.",
|
Usage: "The ID or name of the virtual network to which the route is associated to.",
|
||||||
}
|
}
|
||||||
|
|
||||||
routeAddError = errors.New("You must supply exactly one argument, the ID or CIDR of the route you want to delete")
|
errAddRoute = errors.New("You must supply exactly one argument, the ID or CIDR of the route you want to delete")
|
||||||
)
|
)
|
||||||
|
|
||||||
func buildRouteIPSubcommand() *cli.Command {
|
func buildRouteIPSubcommand() *cli.Command {
|
||||||
|
@ -32,7 +32,7 @@ func buildRouteIPSubcommand() *cli.Command {
|
||||||
UsageText: "cloudflared tunnel [--config FILEPATH] route COMMAND [arguments...]",
|
UsageText: "cloudflared tunnel [--config FILEPATH] route COMMAND [arguments...]",
|
||||||
Description: `cloudflared can provision routes for any IP space in your corporate network. Users enrolled in
|
Description: `cloudflared can provision routes for any IP space in your corporate network. Users enrolled in
|
||||||
your Cloudflare for Teams organization can reach those IPs through the Cloudflare WARP
|
your Cloudflare for Teams organization can reach those IPs through the Cloudflare WARP
|
||||||
client. You can then configure L7/L4 filtering on https://dash.teams.cloudflare.com to
|
client. You can then configure L7/L4 filtering on https://one.dash.cloudflare.com to
|
||||||
determine who can reach certain routes.
|
determine who can reach certain routes.
|
||||||
By default IP routes all exist within a single virtual network. If you use the same IP
|
By default IP routes all exist within a single virtual network. If you use the same IP
|
||||||
space(s) in different physical private networks, all meant to be reachable via IP routes,
|
space(s) in different physical private networks, all meant to be reachable via IP routes,
|
||||||
|
@ -187,7 +187,7 @@ func deleteRouteCommand(c *cli.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.NArg() != 1 {
|
if c.NArg() != 1 {
|
||||||
return routeAddError
|
return errAddRoute
|
||||||
}
|
}
|
||||||
|
|
||||||
var routeId uuid.UUID
|
var routeId uuid.UUID
|
||||||
|
@ -195,7 +195,7 @@ func deleteRouteCommand(c *cli.Context) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_, network, err := net.ParseCIDR(c.Args().First())
|
_, network, err := net.ParseCIDR(c.Args().First())
|
||||||
if err != nil || network == nil {
|
if err != nil || network == nil {
|
||||||
return routeAddError
|
return errAddRoute
|
||||||
}
|
}
|
||||||
|
|
||||||
var vnetId *uuid.UUID
|
var vnetId *uuid.UUID
|
||||||
|
|
|
@ -15,13 +15,14 @@ import (
|
||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
||||||
|
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/config"
|
"github.com/cloudflare/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/cloudflare/cloudflared/logger"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultCheckUpdateFreq = time.Hour * 24
|
DefaultCheckUpdateFreq = time.Hour * 24
|
||||||
noUpdateInShellMessage = "cloudflared will not automatically update when run from the shell. To enable auto-updates, run cloudflared as a service: https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/run-tunnel/as-a-service/"
|
noUpdateInShellMessage = "cloudflared will not automatically update when run from the shell. To enable auto-updates, run cloudflared as a service: https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/configure-tunnels/local-management/as-a-service/"
|
||||||
noUpdateOnWindowsMessage = "cloudflared will not automatically update on Windows systems."
|
noUpdateOnWindowsMessage = "cloudflared will not automatically update on Windows systems."
|
||||||
noUpdateManagedPackageMessage = "cloudflared will not automatically update if installed by a package manager."
|
noUpdateManagedPackageMessage = "cloudflared will not automatically update if installed by a package manager."
|
||||||
isManagedInstallFile = ".installedFromPackageManager"
|
isManagedInstallFile = ".installedFromPackageManager"
|
||||||
|
@ -38,6 +39,7 @@ var (
|
||||||
|
|
||||||
// BinaryUpdated implements ExitCoder interface, the app will exit with status code 11
|
// BinaryUpdated implements ExitCoder interface, the app will exit with status code 11
|
||||||
// https://pkg.go.dev/github.com/urfave/cli/v2?tab=doc#ExitCoder
|
// https://pkg.go.dev/github.com/urfave/cli/v2?tab=doc#ExitCoder
|
||||||
|
// nolint: errname
|
||||||
type statusSuccess struct {
|
type statusSuccess struct {
|
||||||
newVersion string
|
newVersion string
|
||||||
}
|
}
|
||||||
|
@ -50,16 +52,16 @@ func (u *statusSuccess) ExitCode() int {
|
||||||
return 11
|
return 11
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateErr implements ExitCoder interface, the app will exit with status code 10
|
// statusError implements ExitCoder interface, the app will exit with status code 10
|
||||||
type statusErr struct {
|
type statusError struct {
|
||||||
err error
|
err error
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *statusErr) Error() string {
|
func (e *statusError) Error() string {
|
||||||
return fmt.Sprintf("failed to update cloudflared: %v", e.err)
|
return fmt.Sprintf("failed to update cloudflared: %v", e.err)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *statusErr) ExitCode() int {
|
func (e *statusError) ExitCode() int {
|
||||||
return 10
|
return 10
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -79,7 +81,7 @@ type UpdateOutcome struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (uo *UpdateOutcome) noUpdate() bool {
|
func (uo *UpdateOutcome) noUpdate() bool {
|
||||||
return uo.Error == nil && uo.Updated == false
|
return uo.Error == nil && !uo.Updated
|
||||||
}
|
}
|
||||||
|
|
||||||
func Init(info *cliutil.BuildInfo) {
|
func Init(info *cliutil.BuildInfo) {
|
||||||
|
@ -153,7 +155,7 @@ func Update(c *cli.Context) error {
|
||||||
log.Info().Msg("cloudflared is set to update from staging")
|
log.Info().Msg("cloudflared is set to update from staging")
|
||||||
}
|
}
|
||||||
|
|
||||||
isForced := c.Bool("force")
|
isForced := c.Bool(cfdflags.Force)
|
||||||
if isForced {
|
if isForced {
|
||||||
log.Info().Msg("cloudflared is set to upgrade to the latest publish version regardless of the current version")
|
log.Info().Msg("cloudflared is set to upgrade to the latest publish version regardless of the current version")
|
||||||
}
|
}
|
||||||
|
@ -166,7 +168,7 @@ func Update(c *cli.Context) error {
|
||||||
intendedVersion: c.String("version"),
|
intendedVersion: c.String("version"),
|
||||||
})
|
})
|
||||||
if updateOutcome.Error != nil {
|
if updateOutcome.Error != nil {
|
||||||
return &statusErr{updateOutcome.Error}
|
return &statusError{updateOutcome.Error}
|
||||||
}
|
}
|
||||||
|
|
||||||
if updateOutcome.noUpdate() {
|
if updateOutcome.noUpdate() {
|
||||||
|
@ -252,7 +254,7 @@ func (a *AutoUpdater) Run(ctx context.Context) error {
|
||||||
pid, err := a.listeners.StartProcess()
|
pid, err := a.listeners.StartProcess()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.log.Err(err).Msg("Unable to restart server automatically")
|
a.log.Err(err).Msg("Unable to restart server automatically")
|
||||||
return &statusErr{err: err}
|
return &statusError{err: err}
|
||||||
}
|
}
|
||||||
// stop old process after autoupdate. Otherwise we create a new process
|
// stop old process after autoupdate. Otherwise we create a new process
|
||||||
// after each update
|
// after each update
|
||||||
|
|
|
@ -10,9 +10,9 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"os"
|
"os"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
|
||||||
"text/template"
|
"text/template"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
|
@ -134,7 +134,7 @@ func (v *WorkersVersion) Apply() error {
|
||||||
|
|
||||||
if err := os.Rename(newFilePath, v.targetPath); err != nil {
|
if err := os.Rename(newFilePath, v.targetPath); err != nil {
|
||||||
//attempt rollback
|
//attempt rollback
|
||||||
os.Rename(oldFilePath, v.targetPath)
|
_ = os.Rename(oldFilePath, v.targetPath)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
os.Remove(oldFilePath)
|
os.Remove(oldFilePath)
|
||||||
|
@ -181,7 +181,7 @@ func download(url, filepath string, isCompressed bool) error {
|
||||||
tr := tar.NewReader(gr)
|
tr := tar.NewReader(gr)
|
||||||
|
|
||||||
// advance the reader pass the header, which will be the single binary file
|
// advance the reader pass the header, which will be the single binary file
|
||||||
tr.Next()
|
_, _ = tr.Next()
|
||||||
|
|
||||||
r = tr
|
r = tr
|
||||||
}
|
}
|
||||||
|
@ -198,7 +198,7 @@ func download(url, filepath string, isCompressed bool) error {
|
||||||
|
|
||||||
// isCompressedFile is a really simple file extension check to see if this is a macos tar and gzipped
|
// isCompressedFile is a really simple file extension check to see if this is a macos tar and gzipped
|
||||||
func isCompressedFile(urlstring string) bool {
|
func isCompressedFile(urlstring string) bool {
|
||||||
if strings.HasSuffix(urlstring, ".tgz") {
|
if path.Ext(urlstring) == ".tgz" {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -206,7 +206,7 @@ func isCompressedFile(urlstring string) bool {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return strings.HasSuffix(u.Path, ".tgz")
|
return path.Ext(u.Path) == ".tgz"
|
||||||
}
|
}
|
||||||
|
|
||||||
// writeBatchFile writes a batch file out to disk
|
// writeBatchFile writes a batch file out to disk
|
||||||
|
@ -249,7 +249,6 @@ func runWindowsBatch(batchFile string) error {
|
||||||
if exitError, ok := err.(*exec.ExitError); ok {
|
if exitError, ok := err.(*exec.ExitError); ok {
|
||||||
return fmt.Errorf("Error during update : %s;", string(exitError.Stderr))
|
return fmt.Errorf("Error during update : %s;", string(exitError.Stderr))
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -26,7 +26,7 @@ import (
|
||||||
const (
|
const (
|
||||||
windowsServiceName = "Cloudflared"
|
windowsServiceName = "Cloudflared"
|
||||||
windowsServiceDescription = "Cloudflared agent"
|
windowsServiceDescription = "Cloudflared agent"
|
||||||
windowsServiceUrl = "https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/run-tunnel/as-a-service/windows/"
|
windowsServiceUrl = "https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/configure-tunnels/local-management/as-a-service/windows/"
|
||||||
|
|
||||||
recoverActionDelay = time.Second * 20
|
recoverActionDelay = time.Second * 20
|
||||||
failureCountResetPeriod = time.Hour * 24
|
failureCountResetPeriod = time.Hour * 24
|
||||||
|
|
|
@ -27,13 +27,19 @@ const (
|
||||||
MaxConcurrentStreams = math.MaxUint32
|
MaxConcurrentStreams = math.MaxUint32
|
||||||
|
|
||||||
contentTypeHeader = "content-type"
|
contentTypeHeader = "content-type"
|
||||||
|
contentLengthHeader = "content-length"
|
||||||
|
transferEncodingHeader = "transfer-encoding"
|
||||||
|
|
||||||
sseContentType = "text/event-stream"
|
sseContentType = "text/event-stream"
|
||||||
grpcContentType = "application/grpc"
|
grpcContentType = "application/grpc"
|
||||||
|
sseJsonContentType = "application/x-ndjson"
|
||||||
|
|
||||||
|
chunkTransferEncoding = "chunked"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
switchingProtocolText = fmt.Sprintf("%d %s", http.StatusSwitchingProtocols, http.StatusText(http.StatusSwitchingProtocols))
|
switchingProtocolText = fmt.Sprintf("%d %s", http.StatusSwitchingProtocols, http.StatusText(http.StatusSwitchingProtocols))
|
||||||
flushableContentTypes = []string{sseContentType, grpcContentType}
|
flushableContentTypes = []string{sseContentType, grpcContentType, sseJsonContentType}
|
||||||
)
|
)
|
||||||
|
|
||||||
// TunnelConnection represents the connection to the edge.
|
// TunnelConnection represents the connection to the edge.
|
||||||
|
@ -60,6 +66,7 @@ type Credentials struct {
|
||||||
AccountTag string
|
AccountTag string
|
||||||
TunnelSecret []byte
|
TunnelSecret []byte
|
||||||
TunnelID uuid.UUID
|
TunnelID uuid.UUID
|
||||||
|
Endpoint string
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *Credentials) Auth() pogs.TunnelAuth {
|
func (c *Credentials) Auth() pogs.TunnelAuth {
|
||||||
|
@ -74,13 +81,16 @@ type TunnelToken struct {
|
||||||
AccountTag string `json:"a"`
|
AccountTag string `json:"a"`
|
||||||
TunnelSecret []byte `json:"s"`
|
TunnelSecret []byte `json:"s"`
|
||||||
TunnelID uuid.UUID `json:"t"`
|
TunnelID uuid.UUID `json:"t"`
|
||||||
|
Endpoint string `json:"e,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t TunnelToken) Credentials() Credentials {
|
func (t TunnelToken) Credentials() Credentials {
|
||||||
|
// nolint: gosimple
|
||||||
return Credentials{
|
return Credentials{
|
||||||
AccountTag: t.AccountTag,
|
AccountTag: t.AccountTag,
|
||||||
TunnelSecret: t.TunnelSecret,
|
TunnelSecret: t.TunnelSecret,
|
||||||
TunnelID: t.TunnelID,
|
TunnelID: t.TunnelID,
|
||||||
|
Endpoint: t.Endpoint,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -270,6 +280,22 @@ type ConnectedFuse interface {
|
||||||
// Helper method to let the caller know what content-types should require a flush on every
|
// Helper method to let the caller know what content-types should require a flush on every
|
||||||
// write to a ResponseWriter.
|
// write to a ResponseWriter.
|
||||||
func shouldFlush(headers http.Header) bool {
|
func shouldFlush(headers http.Header) bool {
|
||||||
|
// When doing Server Side Events (SSE), some frameworks don't respect the `Content-Type` header.
|
||||||
|
// Therefore, we need to rely on other ways to know whether we should flush on write or not. A good
|
||||||
|
// approach is to assume that responses without `Content-Length` or with `Transfer-Encoding: chunked`
|
||||||
|
// are streams, and therefore, should be flushed right away to the eyeball.
|
||||||
|
// References:
|
||||||
|
// - https://datatracker.ietf.org/doc/html/rfc7230#section-4.1
|
||||||
|
// - https://datatracker.ietf.org/doc/html/rfc9112#section-6.1
|
||||||
|
if contentLength := headers.Get(contentLengthHeader); contentLength == "" {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if transferEncoding := headers.Get(transferEncodingHeader); transferEncoding != "" {
|
||||||
|
transferEncoding = strings.ToLower(transferEncoding)
|
||||||
|
if strings.Contains(transferEncoding, chunkTransferEncoding) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
if contentType := headers.Get(contentTypeHeader); contentType != "" {
|
if contentType := headers.Get(contentTypeHeader); contentType != "" {
|
||||||
contentType = strings.ToLower(contentType)
|
contentType = strings.ToLower(contentType)
|
||||||
for _, c := range flushableContentTypes {
|
for _, c := range flushableContentTypes {
|
||||||
|
@ -278,7 +304,6 @@ func shouldFlush(headers http.Header) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,10 +7,12 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"math/big"
|
"math/big"
|
||||||
"net/http"
|
"net/http"
|
||||||
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
pkgerrors "github.com/pkg/errors"
|
pkgerrors "github.com/pkg/errors"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
cfdflow "github.com/cloudflare/cloudflared/flow"
|
cfdflow "github.com/cloudflare/cloudflared/flow"
|
||||||
|
|
||||||
|
@ -209,3 +211,48 @@ func (mcf mockConnectedFuse) Connected() {}
|
||||||
func (mcf mockConnectedFuse) IsConnected() bool {
|
func (mcf mockConnectedFuse) IsConnected() bool {
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestShouldFlushHeaders(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
headers map[string]string
|
||||||
|
shouldFlush bool
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
headers: map[string]string{contentTypeHeader: "application/json", contentLengthHeader: "1"},
|
||||||
|
shouldFlush: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
headers: map[string]string{contentTypeHeader: "text/html", contentLengthHeader: "1"},
|
||||||
|
shouldFlush: false,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
headers: map[string]string{contentTypeHeader: "text/event-stream", contentLengthHeader: "1"},
|
||||||
|
shouldFlush: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
headers: map[string]string{contentTypeHeader: "application/grpc", contentLengthHeader: "1"},
|
||||||
|
shouldFlush: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
headers: map[string]string{contentTypeHeader: "application/x-ndjson", contentLengthHeader: "1"},
|
||||||
|
shouldFlush: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
headers: map[string]string{contentTypeHeader: "application/json"},
|
||||||
|
shouldFlush: true,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
headers: map[string]string{contentTypeHeader: "application/json", contentLengthHeader: "-1", transferEncodingHeader: "chunked"},
|
||||||
|
shouldFlush: true,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, test := range tests {
|
||||||
|
headers := http.Header{}
|
||||||
|
for k, v := range test.headers {
|
||||||
|
headers.Add(k, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
require.Equal(t, test.shouldFlush, shouldFlush(headers))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -10,7 +10,7 @@ import (
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/management"
|
"github.com/cloudflare/cloudflared/management"
|
||||||
"github.com/cloudflare/cloudflared/tunnelrpc"
|
"github.com/cloudflare/cloudflared/tunnelrpc"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// registerClient derives a named tunnel rpc client that can then be used to register and unregister connections.
|
// registerClient derives a named tunnel rpc client that can then be used to register and unregister connections.
|
||||||
|
@ -36,7 +36,7 @@ type controlStream struct {
|
||||||
// ControlStreamHandler registers connections with origintunneld and initiates graceful shutdown.
|
// ControlStreamHandler registers connections with origintunneld and initiates graceful shutdown.
|
||||||
type ControlStreamHandler interface {
|
type ControlStreamHandler interface {
|
||||||
// ServeControlStream handles the control plane of the transport in the current goroutine calling this
|
// ServeControlStream handles the control plane of the transport in the current goroutine calling this
|
||||||
ServeControlStream(ctx context.Context, rw io.ReadWriteCloser, connOptions *tunnelpogs.ConnectionOptions, tunnelConfigGetter TunnelConfigJSONGetter) error
|
ServeControlStream(ctx context.Context, rw io.ReadWriteCloser, connOptions *pogs.ConnectionOptions, tunnelConfigGetter TunnelConfigJSONGetter) error
|
||||||
// IsStopped tells whether the method above has finished
|
// IsStopped tells whether the method above has finished
|
||||||
IsStopped() bool
|
IsStopped() bool
|
||||||
}
|
}
|
||||||
|
@ -78,7 +78,7 @@ func NewControlStream(
|
||||||
func (c *controlStream) ServeControlStream(
|
func (c *controlStream) ServeControlStream(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
rw io.ReadWriteCloser,
|
rw io.ReadWriteCloser,
|
||||||
connOptions *tunnelpogs.ConnectionOptions,
|
connOptions *pogs.ConnectionOptions,
|
||||||
tunnelConfigGetter TunnelConfigJSONGetter,
|
tunnelConfigGetter TunnelConfigJSONGetter,
|
||||||
) error {
|
) error {
|
||||||
registrationClient := c.registerClientFunc(ctx, rw, c.registerTimeout)
|
registrationClient := c.registerClientFunc(ctx, rw, c.registerTimeout)
|
||||||
|
|
|
@ -19,7 +19,7 @@ import (
|
||||||
cfdflow "github.com/cloudflare/cloudflared/flow"
|
cfdflow "github.com/cloudflare/cloudflared/flow"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/tracing"
|
"github.com/cloudflare/cloudflared/tracing"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
)
|
)
|
||||||
|
|
||||||
// note: these constants are exported so we can reuse them in the edge-side code
|
// note: these constants are exported so we can reuse them in the edge-side code
|
||||||
|
@ -39,7 +39,7 @@ type HTTP2Connection struct {
|
||||||
conn net.Conn
|
conn net.Conn
|
||||||
server *http2.Server
|
server *http2.Server
|
||||||
orchestrator Orchestrator
|
orchestrator Orchestrator
|
||||||
connOptions *tunnelpogs.ConnectionOptions
|
connOptions *pogs.ConnectionOptions
|
||||||
observer *Observer
|
observer *Observer
|
||||||
connIndex uint8
|
connIndex uint8
|
||||||
|
|
||||||
|
@ -54,7 +54,7 @@ type HTTP2Connection struct {
|
||||||
func NewHTTP2Connection(
|
func NewHTTP2Connection(
|
||||||
conn net.Conn,
|
conn net.Conn,
|
||||||
orchestrator Orchestrator,
|
orchestrator Orchestrator,
|
||||||
connOptions *tunnelpogs.ConnectionOptions,
|
connOptions *pogs.ConnectionOptions,
|
||||||
observer *Observer,
|
observer *Observer,
|
||||||
connIndex uint8,
|
connIndex uint8,
|
||||||
controlStreamHandler ControlStreamHandler,
|
controlStreamHandler ControlStreamHandler,
|
||||||
|
|
|
@ -22,7 +22,6 @@ import (
|
||||||
cfdquic "github.com/cloudflare/cloudflared/quic"
|
cfdquic "github.com/cloudflare/cloudflared/quic"
|
||||||
"github.com/cloudflare/cloudflared/tracing"
|
"github.com/cloudflare/cloudflared/tracing"
|
||||||
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
|
||||||
rpcquic "github.com/cloudflare/cloudflared/tunnelrpc/quic"
|
rpcquic "github.com/cloudflare/cloudflared/tunnelrpc/quic"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -44,7 +43,7 @@ type quicConnection struct {
|
||||||
orchestrator Orchestrator
|
orchestrator Orchestrator
|
||||||
datagramHandler DatagramSessionHandler
|
datagramHandler DatagramSessionHandler
|
||||||
controlStreamHandler ControlStreamHandler
|
controlStreamHandler ControlStreamHandler
|
||||||
connOptions *tunnelpogs.ConnectionOptions
|
connOptions *pogs.ConnectionOptions
|
||||||
connIndex uint8
|
connIndex uint8
|
||||||
|
|
||||||
rpcTimeout time.Duration
|
rpcTimeout time.Duration
|
||||||
|
@ -235,7 +234,7 @@ func (q *quicConnection) dispatchRequest(ctx context.Context, stream *rpcquic.Re
|
||||||
}
|
}
|
||||||
|
|
||||||
// UpdateConfiguration is the RPC method invoked by edge when there is a new configuration
|
// UpdateConfiguration is the RPC method invoked by edge when there is a new configuration
|
||||||
func (q *quicConnection) UpdateConfiguration(ctx context.Context, version int32, config []byte) *tunnelpogs.UpdateConfigurationResponse {
|
func (q *quicConnection) UpdateConfiguration(ctx context.Context, version int32, config []byte) *pogs.UpdateConfigurationResponse {
|
||||||
return q.orchestrator.UpdateConfig(version, config)
|
return q.orchestrator.UpdateConfig(version, config)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,11 +2,11 @@ package connection
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"github.com/pkg/errors"
|
||||||
"github.com/quic-go/quic-go"
|
"github.com/quic-go/quic-go"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
|
||||||
|
@ -16,10 +16,17 @@ import (
|
||||||
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
ErrUnsupportedRPCUDPRegistration = errors.New("datagram v3 does not support RegisterUdpSession RPC")
|
||||||
|
ErrUnsupportedRPCUDPUnregistration = errors.New("datagram v3 does not support UnregisterUdpSession RPC")
|
||||||
|
)
|
||||||
|
|
||||||
type datagramV3Connection struct {
|
type datagramV3Connection struct {
|
||||||
conn quic.Connection
|
conn quic.Connection
|
||||||
|
index uint8
|
||||||
// datagramMuxer mux/demux datagrams from quic connection
|
// datagramMuxer mux/demux datagrams from quic connection
|
||||||
datagramMuxer cfdquic.DatagramConn
|
datagramMuxer cfdquic.DatagramConn
|
||||||
|
metrics cfdquic.Metrics
|
||||||
logger *zerolog.Logger
|
logger *zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,7 +47,9 @@ func NewDatagramV3Connection(ctx context.Context,
|
||||||
|
|
||||||
return &datagramV3Connection{
|
return &datagramV3Connection{
|
||||||
conn,
|
conn,
|
||||||
|
index,
|
||||||
datagramMuxer,
|
datagramMuxer,
|
||||||
|
metrics,
|
||||||
logger,
|
logger,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -50,9 +59,11 @@ func (d *datagramV3Connection) Serve(ctx context.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *datagramV3Connection) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeAfterIdleHint time.Duration, traceContext string) (*pogs.RegisterUdpSessionResponse, error) {
|
func (d *datagramV3Connection) RegisterUdpSession(ctx context.Context, sessionID uuid.UUID, dstIP net.IP, dstPort uint16, closeAfterIdleHint time.Duration, traceContext string) (*pogs.RegisterUdpSessionResponse, error) {
|
||||||
return nil, fmt.Errorf("datagram v3 does not support RegisterUdpSession RPC")
|
d.metrics.UnsupportedRemoteCommand(d.index, "register_udp_session")
|
||||||
|
return nil, ErrUnsupportedRPCUDPRegistration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (d *datagramV3Connection) UnregisterUdpSession(ctx context.Context, sessionID uuid.UUID, message string) error {
|
func (d *datagramV3Connection) UnregisterUdpSession(ctx context.Context, sessionID uuid.UUID, message string) error {
|
||||||
return fmt.Errorf("datagram v3 does not support UnregisterUdpSession RPC")
|
d.metrics.UnsupportedRemoteCommand(d.index, "unregister_udp_session")
|
||||||
|
return ErrUnsupportedRPCUDPUnregistration
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,6 +9,7 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
logFieldOriginCertPath = "originCertPath"
|
logFieldOriginCertPath = "originCertPath"
|
||||||
|
FedEndpoint = "fed"
|
||||||
)
|
)
|
||||||
|
|
||||||
type User struct {
|
type User struct {
|
||||||
|
@ -32,6 +33,10 @@ func (c User) CertPath() string {
|
||||||
return c.certPath
|
return c.certPath
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (c User) IsFEDEndpoint() bool {
|
||||||
|
return c.cert.Endpoint == FedEndpoint
|
||||||
|
}
|
||||||
|
|
||||||
// Client uses the user credentials to create a Cloudflare API client
|
// Client uses the user credentials to create a Cloudflare API client
|
||||||
func (c *User) Client(apiURL string, userAgent string, log *zerolog.Logger) (cfapi.Client, error) {
|
func (c *User) Client(apiURL string, userAgent string, log *zerolog.Logger) (cfapi.Client, error) {
|
||||||
if apiURL == "" {
|
if apiURL == "" {
|
||||||
|
@ -45,7 +50,6 @@ func (c *User) Client(apiURL string, userAgent string, log *zerolog.Logger) (cfa
|
||||||
userAgent,
|
userAgent,
|
||||||
log,
|
log,
|
||||||
)
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ package credentials
|
||||||
import (
|
import (
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -13,8 +13,8 @@ func TestCredentialsRead(t *testing.T) {
|
||||||
file, err := os.ReadFile("test-cloudflare-tunnel-cert-json.pem")
|
file, err := os.ReadFile("test-cloudflare-tunnel-cert-json.pem")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
certPath := path.Join(dir, originCertFile)
|
certPath := filepath.Join(dir, originCertFile)
|
||||||
os.WriteFile(certPath, file, fs.ModePerm)
|
_ = os.WriteFile(certPath, file, fs.ModePerm)
|
||||||
user, err := Read(certPath, &nopLog)
|
user, err := Read(certPath, &nopLog)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, certPath, user.CertPath())
|
require.Equal(t, certPath, user.CertPath())
|
||||||
|
|
|
@ -1,11 +1,13 @@
|
||||||
package credentials
|
package credentials
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"encoding/pem"
|
"encoding/pem"
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
"strings"
|
||||||
|
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
@ -15,19 +17,30 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultCredentialFile = "cert.pem"
|
DefaultCredentialFile = "cert.pem"
|
||||||
OriginCertFlag = "origincert"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type namedTunnelToken struct {
|
type OriginCert struct {
|
||||||
ZoneID string `json:"zoneID"`
|
ZoneID string `json:"zoneID"`
|
||||||
AccountID string `json:"accountID"`
|
AccountID string `json:"accountID"`
|
||||||
APIToken string `json:"apiToken"`
|
APIToken string `json:"apiToken"`
|
||||||
|
Endpoint string `json:"endpoint,omitempty"`
|
||||||
}
|
}
|
||||||
|
|
||||||
type OriginCert struct {
|
func (oc *OriginCert) UnmarshalJSON(data []byte) error {
|
||||||
ZoneID string
|
var aux struct {
|
||||||
APIToken string
|
ZoneID string `json:"zoneID"`
|
||||||
AccountID string
|
AccountID string `json:"accountID"`
|
||||||
|
APIToken string `json:"apiToken"`
|
||||||
|
Endpoint string `json:"endpoint,omitempty"`
|
||||||
|
}
|
||||||
|
if err := json.Unmarshal(data, &aux); err != nil {
|
||||||
|
return fmt.Errorf("error parsing OriginCert: %v", err)
|
||||||
|
}
|
||||||
|
oc.ZoneID = aux.ZoneID
|
||||||
|
oc.AccountID = aux.AccountID
|
||||||
|
oc.APIToken = aux.APIToken
|
||||||
|
oc.Endpoint = strings.ToLower(aux.Endpoint)
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// FindDefaultOriginCertPath returns the first path that contains a cert.pem file. If none of the
|
// FindDefaultOriginCertPath returns the first path that contains a cert.pem file. If none of the
|
||||||
|
@ -42,40 +55,56 @@ func FindDefaultOriginCertPath() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func DecodeOriginCert(blocks []byte) (*OriginCert, error) {
|
||||||
|
return decodeOriginCert(blocks)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cert *OriginCert) EncodeOriginCert() ([]byte, error) {
|
||||||
|
if cert == nil {
|
||||||
|
return nil, fmt.Errorf("originCert cannot be nil")
|
||||||
|
}
|
||||||
|
buffer, err := json.Marshal(cert)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("originCert marshal failed: %v", err)
|
||||||
|
}
|
||||||
|
block := pem.Block{
|
||||||
|
Type: "ARGO TUNNEL TOKEN",
|
||||||
|
Headers: map[string]string{},
|
||||||
|
Bytes: buffer,
|
||||||
|
}
|
||||||
|
var out bytes.Buffer
|
||||||
|
err = pem.Encode(&out, &block)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("pem encoding failed: %v", err)
|
||||||
|
}
|
||||||
|
return out.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
func decodeOriginCert(blocks []byte) (*OriginCert, error) {
|
func decodeOriginCert(blocks []byte) (*OriginCert, error) {
|
||||||
if len(blocks) == 0 {
|
if len(blocks) == 0 {
|
||||||
return nil, fmt.Errorf("Cannot decode empty certificate")
|
return nil, fmt.Errorf("cannot decode empty certificate")
|
||||||
}
|
}
|
||||||
originCert := OriginCert{}
|
originCert := OriginCert{}
|
||||||
block, rest := pem.Decode(blocks)
|
block, rest := pem.Decode(blocks)
|
||||||
for {
|
for block != nil {
|
||||||
if block == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
switch block.Type {
|
switch block.Type {
|
||||||
case "PRIVATE KEY", "CERTIFICATE":
|
case "PRIVATE KEY", "CERTIFICATE":
|
||||||
// this is for legacy purposes.
|
// this is for legacy purposes.
|
||||||
break
|
|
||||||
case "ARGO TUNNEL TOKEN":
|
case "ARGO TUNNEL TOKEN":
|
||||||
if originCert.ZoneID != "" || originCert.APIToken != "" {
|
if originCert.ZoneID != "" || originCert.APIToken != "" {
|
||||||
return nil, fmt.Errorf("Found multiple tokens in the certificate")
|
return nil, fmt.Errorf("found multiple tokens in the certificate")
|
||||||
}
|
}
|
||||||
// The token is a string,
|
// The token is a string,
|
||||||
// Try the newer JSON format
|
// Try the newer JSON format
|
||||||
ntt := namedTunnelToken{}
|
_ = json.Unmarshal(block.Bytes, &originCert)
|
||||||
if err := json.Unmarshal(block.Bytes, &ntt); err == nil {
|
|
||||||
originCert.ZoneID = ntt.ZoneID
|
|
||||||
originCert.APIToken = ntt.APIToken
|
|
||||||
originCert.AccountID = ntt.AccountID
|
|
||||||
}
|
|
||||||
default:
|
default:
|
||||||
return nil, fmt.Errorf("Unknown block %s in the certificate", block.Type)
|
return nil, fmt.Errorf("unknown block %s in the certificate", block.Type)
|
||||||
}
|
}
|
||||||
block, rest = pem.Decode(rest)
|
block, rest = pem.Decode(rest)
|
||||||
}
|
}
|
||||||
|
|
||||||
if originCert.ZoneID == "" || originCert.APIToken == "" {
|
if originCert.ZoneID == "" || originCert.APIToken == "" {
|
||||||
return nil, fmt.Errorf("Missing token in the certificate")
|
return nil, fmt.Errorf("missing token in the certificate")
|
||||||
}
|
}
|
||||||
|
|
||||||
return &originCert, nil
|
return &originCert, nil
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/fs"
|
"io/fs"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path/filepath"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
@ -16,27 +16,25 @@ const (
|
||||||
originCertFile = "cert.pem"
|
originCertFile = "cert.pem"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var nopLog = zerolog.Nop().With().Logger()
|
||||||
nopLog = zerolog.Nop().With().Logger()
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLoadOriginCert(t *testing.T) {
|
func TestLoadOriginCert(t *testing.T) {
|
||||||
cert, err := decodeOriginCert([]byte{})
|
cert, err := decodeOriginCert([]byte{})
|
||||||
assert.Equal(t, fmt.Errorf("Cannot decode empty certificate"), err)
|
assert.Equal(t, fmt.Errorf("cannot decode empty certificate"), err)
|
||||||
assert.Nil(t, cert)
|
assert.Nil(t, cert)
|
||||||
|
|
||||||
blocks, err := os.ReadFile("test-cert-unknown-block.pem")
|
blocks, err := os.ReadFile("test-cert-unknown-block.pem")
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cert, err = decodeOriginCert(blocks)
|
cert, err = decodeOriginCert(blocks)
|
||||||
assert.Equal(t, fmt.Errorf("Unknown block RSA PRIVATE KEY in the certificate"), err)
|
assert.Equal(t, fmt.Errorf("unknown block RSA PRIVATE KEY in the certificate"), err)
|
||||||
assert.Nil(t, cert)
|
assert.Nil(t, cert)
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestJSONArgoTunnelTokenEmpty(t *testing.T) {
|
func TestJSONArgoTunnelTokenEmpty(t *testing.T) {
|
||||||
blocks, err := os.ReadFile("test-cert-no-token.pem")
|
blocks, err := os.ReadFile("test-cert-no-token.pem")
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cert, err := decodeOriginCert(blocks)
|
cert, err := decodeOriginCert(blocks)
|
||||||
assert.Equal(t, fmt.Errorf("Missing token in the certificate"), err)
|
assert.Equal(t, fmt.Errorf("missing token in the certificate"), err)
|
||||||
assert.Nil(t, cert)
|
assert.Nil(t, cert)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,51 +50,21 @@ func TestJSONArgoTunnelToken(t *testing.T) {
|
||||||
|
|
||||||
func CloudflareTunnelTokenTest(t *testing.T, path string) {
|
func CloudflareTunnelTokenTest(t *testing.T, path string) {
|
||||||
blocks, err := os.ReadFile(path)
|
blocks, err := os.ReadFile(path)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
cert, err := decodeOriginCert(blocks)
|
cert, err := decodeOriginCert(blocks)
|
||||||
assert.NoError(t, err)
|
require.NoError(t, err)
|
||||||
assert.NotNil(t, cert)
|
assert.NotNil(t, cert)
|
||||||
assert.Equal(t, "7b0a4d77dfb881c1a3b7d61ea9443e19", cert.ZoneID)
|
assert.Equal(t, "7b0a4d77dfb881c1a3b7d61ea9443e19", cert.ZoneID)
|
||||||
key := "test-service-key"
|
key := "test-service-key"
|
||||||
assert.Equal(t, key, cert.APIToken)
|
assert.Equal(t, key, cert.APIToken)
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockFile struct {
|
|
||||||
path string
|
|
||||||
data []byte
|
|
||||||
err error
|
|
||||||
}
|
|
||||||
|
|
||||||
type mockFileSystem struct {
|
|
||||||
files map[string]mockFile
|
|
||||||
}
|
|
||||||
|
|
||||||
func newMockFileSystem(files ...mockFile) *mockFileSystem {
|
|
||||||
fs := mockFileSystem{map[string]mockFile{}}
|
|
||||||
for _, f := range files {
|
|
||||||
fs.files[f.path] = f
|
|
||||||
}
|
|
||||||
return &fs
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *mockFileSystem) ReadFile(path string) ([]byte, error) {
|
|
||||||
if f, ok := fs.files[path]; ok {
|
|
||||||
return f.data, f.err
|
|
||||||
}
|
|
||||||
return nil, os.ErrNotExist
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *mockFileSystem) ValidFilePath(path string) bool {
|
|
||||||
_, exists := fs.files[path]
|
|
||||||
return exists
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFindOriginCert_Valid(t *testing.T) {
|
func TestFindOriginCert_Valid(t *testing.T) {
|
||||||
file, err := os.ReadFile("test-cloudflare-tunnel-cert-json.pem")
|
file, err := os.ReadFile("test-cloudflare-tunnel-cert-json.pem")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
certPath := path.Join(dir, originCertFile)
|
certPath := filepath.Join(dir, originCertFile)
|
||||||
os.WriteFile(certPath, file, fs.ModePerm)
|
_ = os.WriteFile(certPath, file, fs.ModePerm)
|
||||||
path, err := FindOriginCert(certPath, &nopLog)
|
path, err := FindOriginCert(certPath, &nopLog)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, certPath, path)
|
require.Equal(t, certPath, path)
|
||||||
|
@ -104,7 +72,32 @@ func TestFindOriginCert_Valid(t *testing.T) {
|
||||||
|
|
||||||
func TestFindOriginCert_Missing(t *testing.T) {
|
func TestFindOriginCert_Missing(t *testing.T) {
|
||||||
dir := t.TempDir()
|
dir := t.TempDir()
|
||||||
certPath := path.Join(dir, originCertFile)
|
certPath := filepath.Join(dir, originCertFile)
|
||||||
_, err := FindOriginCert(certPath, &nopLog)
|
_, err := FindOriginCert(certPath, &nopLog)
|
||||||
require.Error(t, err)
|
require.Error(t, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func TestEncodeDecodeOriginCert(t *testing.T) {
|
||||||
|
cert := OriginCert{
|
||||||
|
ZoneID: "zone",
|
||||||
|
AccountID: "account",
|
||||||
|
APIToken: "token",
|
||||||
|
Endpoint: "FED",
|
||||||
|
}
|
||||||
|
blocks, err := cert.EncodeOriginCert()
|
||||||
|
require.NoError(t, err)
|
||||||
|
decodedCert, err := DecodeOriginCert(blocks)
|
||||||
|
require.NoError(t, err)
|
||||||
|
assert.NotNil(t, cert)
|
||||||
|
assert.Equal(t, "zone", decodedCert.ZoneID)
|
||||||
|
assert.Equal(t, "account", decodedCert.AccountID)
|
||||||
|
assert.Equal(t, "token", decodedCert.APIToken)
|
||||||
|
assert.Equal(t, FedEndpoint, decodedCert.Endpoint)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestEncodeDecodeNilOriginCert(t *testing.T) {
|
||||||
|
var cert *OriginCert
|
||||||
|
blocks, err := cert.EncodeOriginCert()
|
||||||
|
assert.Equal(t, fmt.Errorf("originCert cannot be nil"), err)
|
||||||
|
require.Nil(t, blocks)
|
||||||
|
}
|
||||||
|
|
|
@ -87,3 +87,4 @@ M2i4QoOFcSKIG+v4SuvgEJHgG8vGvxh2qlSxnMWuPV+7/1P5ATLqDj1PlKms+BNR
|
||||||
y7sc5AT9PclkL3Y9MNzOu0LXyBkGYcl8M0EQfLv9VPbWT+NXiMg/O2CHiT02pAAz
|
y7sc5AT9PclkL3Y9MNzOu0LXyBkGYcl8M0EQfLv9VPbWT+NXiMg/O2CHiT02pAAz
|
||||||
uQicoQq3yzeQh20wtrtaXzTNmA==
|
uQicoQq3yzeQh20wtrtaXzTNmA==
|
||||||
-----END RSA PRIVATE KEY-----
|
-----END RSA PRIVATE KEY-----
|
||||||
|
|
||||||
|
|
|
@ -84,7 +84,7 @@ func (s *Session) waitForCloseCondition(ctx context.Context, closeAfterIdle time
|
||||||
// Closing dstConn cancels read so dstToTransport routine in Serve() can return
|
// Closing dstConn cancels read so dstToTransport routine in Serve() can return
|
||||||
defer s.dstConn.Close()
|
defer s.dstConn.Close()
|
||||||
if closeAfterIdle == 0 {
|
if closeAfterIdle == 0 {
|
||||||
// provide deafult is caller doesn't specify one
|
// provide default is caller doesn't specify one
|
||||||
closeAfterIdle = defaultCloseIdleAfter
|
closeAfterIdle = defaultCloseIdleAfter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
|
@ -54,22 +55,22 @@ func testSessionReturns(t *testing.T, closeBy closeMethod, closeAfterIdle time.D
|
||||||
closedByRemote, err := session.Serve(ctx, closeAfterIdle)
|
closedByRemote, err := session.Serve(ctx, closeAfterIdle)
|
||||||
switch closeBy {
|
switch closeBy {
|
||||||
case closeByContext:
|
case closeByContext:
|
||||||
require.Equal(t, context.Canceled, err)
|
assert.Equal(t, context.Canceled, err)
|
||||||
require.False(t, closedByRemote)
|
assert.False(t, closedByRemote)
|
||||||
case closeByCallingClose:
|
case closeByCallingClose:
|
||||||
require.Equal(t, localCloseReason, err)
|
assert.Equal(t, localCloseReason, err)
|
||||||
require.Equal(t, localCloseReason.byRemote, closedByRemote)
|
assert.Equal(t, localCloseReason.byRemote, closedByRemote)
|
||||||
case closeByTimeout:
|
case closeByTimeout:
|
||||||
require.Equal(t, SessionIdleErr(closeAfterIdle), err)
|
assert.Equal(t, SessionIdleErr(closeAfterIdle), err)
|
||||||
require.False(t, closedByRemote)
|
assert.False(t, closedByRemote)
|
||||||
}
|
}
|
||||||
close(sessionDone)
|
close(sessionDone)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
n, err := session.transportToDst(payload)
|
n, err := session.transportToDst(payload)
|
||||||
require.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
require.Equal(t, len(payload), n)
|
assert.Equal(t, len(payload), n)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
readBuffer := make([]byte, len(payload)+1)
|
readBuffer := make([]byte, len(payload)+1)
|
||||||
|
@ -84,6 +85,8 @@ func testSessionReturns(t *testing.T, closeBy closeMethod, closeAfterIdle time.D
|
||||||
cancel()
|
cancel()
|
||||||
case closeByCallingClose:
|
case closeByCallingClose:
|
||||||
session.close(localCloseReason)
|
session.close(localCloseReason)
|
||||||
|
default:
|
||||||
|
// ignore
|
||||||
}
|
}
|
||||||
|
|
||||||
<-sessionDone
|
<-sessionDone
|
||||||
|
@ -128,7 +131,7 @@ func testActiveSessionNotClosed(t *testing.T, readFromDst bool, writeToDst bool)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
errGroup, ctx := errgroup.WithContext(ctx)
|
errGroup, ctx := errgroup.WithContext(ctx)
|
||||||
errGroup.Go(func() error {
|
errGroup.Go(func() error {
|
||||||
session.Serve(ctx, closeAfterIdle)
|
_, _ = session.Serve(ctx, closeAfterIdle)
|
||||||
if time.Now().Before(startTime.Add(activeTime)) {
|
if time.Now().Before(startTime.Add(activeTime)) {
|
||||||
return fmt.Errorf("session closed while it's still active")
|
return fmt.Errorf("session closed while it's still active")
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
)
|
)
|
||||||
|
|
||||||
type httpClient struct {
|
type httpClient struct {
|
||||||
|
@ -86,12 +86,12 @@ func (client *httpClient) GetLogConfiguration(ctx context.Context) (*LogConfigur
|
||||||
return nil, fmt.Errorf("error convertin pid to int: %w", err)
|
return nil, fmt.Errorf("error convertin pid to int: %w", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
logFile, exists := data[logger.LogFileFlag]
|
logFile, exists := data[cfdflags.LogFile]
|
||||||
if exists {
|
if exists {
|
||||||
return &LogConfiguration{logFile, "", uid}, nil
|
return &LogConfiguration{logFile, "", uid}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
logDirectory, exists := data[logger.LogDirectoryFlag]
|
logDirectory, exists := data[cfdflags.LogDirectory]
|
||||||
if exists {
|
if exists {
|
||||||
return &LogConfiguration{"", logDirectory, uid}, nil
|
return &LogConfiguration{"", logDirectory, uid}, nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,5 +1,7 @@
|
||||||
package features
|
package features
|
||||||
|
|
||||||
|
import "slices"
|
||||||
|
|
||||||
const (
|
const (
|
||||||
FeatureSerializedHeaders = "serialized_headers"
|
FeatureSerializedHeaders = "serialized_headers"
|
||||||
FeatureQuickReconnects = "quick_reconnects"
|
FeatureQuickReconnects = "quick_reconnects"
|
||||||
|
@ -8,7 +10,9 @@ const (
|
||||||
FeaturePostQuantum = "postquantum"
|
FeaturePostQuantum = "postquantum"
|
||||||
FeatureQUICSupportEOF = "support_quic_eof"
|
FeatureQUICSupportEOF = "support_quic_eof"
|
||||||
FeatureManagementLogs = "management_logs"
|
FeatureManagementLogs = "management_logs"
|
||||||
FeatureDatagramV3 = "support_datagram_v3"
|
FeatureDatagramV3_1 = "support_datagram_v3_1"
|
||||||
|
|
||||||
|
DeprecatedFeatureDatagramV3 = "support_datagram_v3" // Deprecated: TUN-9291
|
||||||
)
|
)
|
||||||
|
|
||||||
var defaultFeatures = []string{
|
var defaultFeatures = []string{
|
||||||
|
@ -19,6 +23,11 @@ var defaultFeatures = []string{
|
||||||
FeatureManagementLogs,
|
FeatureManagementLogs,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// List of features that are no longer in-use.
|
||||||
|
var deprecatedFeatures = []string{
|
||||||
|
DeprecatedFeatureDatagramV3,
|
||||||
|
}
|
||||||
|
|
||||||
// Features set by user provided flags
|
// Features set by user provided flags
|
||||||
type staticFeatures struct {
|
type staticFeatures struct {
|
||||||
PostQuantumMode *PostQuantumMode
|
PostQuantumMode *PostQuantumMode
|
||||||
|
@ -40,15 +49,19 @@ const (
|
||||||
// DatagramV2 is the currently supported datagram protocol for UDP and ICMP packets
|
// DatagramV2 is the currently supported datagram protocol for UDP and ICMP packets
|
||||||
DatagramV2 DatagramVersion = FeatureDatagramV2
|
DatagramV2 DatagramVersion = FeatureDatagramV2
|
||||||
// DatagramV3 is a new datagram protocol for UDP and ICMP packets. It is not backwards compatible with datagram v2.
|
// DatagramV3 is a new datagram protocol for UDP and ICMP packets. It is not backwards compatible with datagram v2.
|
||||||
DatagramV3 DatagramVersion = FeatureDatagramV3
|
DatagramV3 DatagramVersion = FeatureDatagramV3_1
|
||||||
)
|
)
|
||||||
|
|
||||||
// Remove any duplicates from the slice
|
// Remove any duplicate features from the list and remove deprecated features
|
||||||
func Dedup(slice []string) []string {
|
func dedupAndRemoveFeatures(features []string) []string {
|
||||||
// Convert the slice into a set
|
// Convert the slice into a set
|
||||||
set := make(map[string]bool, 0)
|
set := map[string]bool{}
|
||||||
for _, str := range slice {
|
for _, feature := range features {
|
||||||
set[str] = true
|
// Remove deprecated features from the provided list
|
||||||
|
if slices.Contains(deprecatedFeatures, feature) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
set[feature] = true
|
||||||
}
|
}
|
||||||
|
|
||||||
// Convert the set back into a slice
|
// Convert the set back into a slice
|
||||||
|
|
|
@ -7,7 +7,6 @@ import (
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
"net"
|
"net"
|
||||||
"slices"
|
"slices"
|
||||||
"sync"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
@ -15,7 +14,6 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
featureSelectorHostname = "cfd-features.argotunnel.com"
|
featureSelectorHostname = "cfd-features.argotunnel.com"
|
||||||
defaultRefreshFreq = time.Hour * 6
|
|
||||||
lookupTimeout = time.Second * 10
|
lookupTimeout = time.Second * 10
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -23,32 +21,27 @@ const (
|
||||||
// If the TXT record is missing a key, the field will unmarshal to the default Go value
|
// If the TXT record is missing a key, the field will unmarshal to the default Go value
|
||||||
|
|
||||||
type featuresRecord struct {
|
type featuresRecord struct {
|
||||||
// support_datagram_v3
|
// DatagramV3Percentage int32 `json:"dv3"` // Removed in TUN-9291
|
||||||
DatagramV3Percentage int32 `json:"dv3"`
|
|
||||||
|
|
||||||
// PostQuantumPercentage int32 `json:"pq"` // Removed in TUN-7970
|
// PostQuantumPercentage int32 `json:"pq"` // Removed in TUN-7970
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewFeatureSelector(ctx context.Context, accountTag string, cliFeatures []string, pq bool, logger *zerolog.Logger) (*FeatureSelector, error) {
|
func NewFeatureSelector(ctx context.Context, accountTag string, cliFeatures []string, pq bool, logger *zerolog.Logger) (*FeatureSelector, error) {
|
||||||
return newFeatureSelector(ctx, accountTag, logger, newDNSResolver(), cliFeatures, pq, defaultRefreshFreq)
|
return newFeatureSelector(ctx, accountTag, logger, newDNSResolver(), cliFeatures, pq)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FeatureSelector determines if this account will try new features. It periodically queries a DNS TXT record
|
// FeatureSelector determines if this account will try new features; loaded once during startup.
|
||||||
// to see which features are turned on.
|
|
||||||
type FeatureSelector struct {
|
type FeatureSelector struct {
|
||||||
accountHash int32
|
accountHash uint32
|
||||||
logger *zerolog.Logger
|
logger *zerolog.Logger
|
||||||
resolver resolver
|
resolver resolver
|
||||||
|
|
||||||
staticFeatures staticFeatures
|
staticFeatures staticFeatures
|
||||||
cliFeatures []string
|
cliFeatures []string
|
||||||
|
|
||||||
// lock protects concurrent access to dynamic features
|
|
||||||
lock sync.RWMutex
|
|
||||||
features featuresRecord
|
features featuresRecord
|
||||||
}
|
}
|
||||||
|
|
||||||
func newFeatureSelector(ctx context.Context, accountTag string, logger *zerolog.Logger, resolver resolver, cliFeatures []string, pq bool, refreshFreq time.Duration) (*FeatureSelector, error) {
|
func newFeatureSelector(ctx context.Context, accountTag string, logger *zerolog.Logger, resolver resolver, cliFeatures []string, pq bool) (*FeatureSelector, error) {
|
||||||
// Combine default features and user-provided features
|
// Combine default features and user-provided features
|
||||||
var pqMode *PostQuantumMode
|
var pqMode *PostQuantumMode
|
||||||
if pq {
|
if pq {
|
||||||
|
@ -64,22 +57,16 @@ func newFeatureSelector(ctx context.Context, accountTag string, logger *zerolog.
|
||||||
logger: logger,
|
logger: logger,
|
||||||
resolver: resolver,
|
resolver: resolver,
|
||||||
staticFeatures: staticFeatures,
|
staticFeatures: staticFeatures,
|
||||||
cliFeatures: Dedup(cliFeatures),
|
cliFeatures: dedupAndRemoveFeatures(cliFeatures),
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := selector.refresh(ctx); err != nil {
|
if err := selector.init(ctx); err != nil {
|
||||||
logger.Err(err).Msg("Failed to fetch features, default to disable")
|
logger.Err(err).Msg("Failed to fetch features, default to disable")
|
||||||
}
|
}
|
||||||
|
|
||||||
go selector.refreshLoop(ctx, refreshFreq)
|
|
||||||
|
|
||||||
return selector, nil
|
return selector, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *FeatureSelector) accountEnabled(percentage int32) bool {
|
|
||||||
return percentage > fs.accountHash
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *FeatureSelector) PostQuantumMode() PostQuantumMode {
|
func (fs *FeatureSelector) PostQuantumMode() PostQuantumMode {
|
||||||
if fs.staticFeatures.PostQuantumMode != nil {
|
if fs.staticFeatures.PostQuantumMode != nil {
|
||||||
return *fs.staticFeatures.PostQuantumMode
|
return *fs.staticFeatures.PostQuantumMode
|
||||||
|
@ -89,11 +76,8 @@ func (fs *FeatureSelector) PostQuantumMode() PostQuantumMode {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *FeatureSelector) DatagramVersion() DatagramVersion {
|
func (fs *FeatureSelector) DatagramVersion() DatagramVersion {
|
||||||
fs.lock.RLock()
|
|
||||||
defer fs.lock.RUnlock()
|
|
||||||
|
|
||||||
// If user provides the feature via the cli, we take it as priority over remote feature evaluation
|
// If user provides the feature via the cli, we take it as priority over remote feature evaluation
|
||||||
if slices.Contains(fs.cliFeatures, FeatureDatagramV3) {
|
if slices.Contains(fs.cliFeatures, FeatureDatagramV3_1) {
|
||||||
return DatagramV3
|
return DatagramV3
|
||||||
}
|
}
|
||||||
// If the user specifies DatagramV2, we also take that over remote
|
// If the user specifies DatagramV2, we also take that over remote
|
||||||
|
@ -101,36 +85,16 @@ func (fs *FeatureSelector) DatagramVersion() DatagramVersion {
|
||||||
return DatagramV2
|
return DatagramV2
|
||||||
}
|
}
|
||||||
|
|
||||||
if fs.accountEnabled(fs.features.DatagramV3Percentage) {
|
|
||||||
return DatagramV3
|
|
||||||
}
|
|
||||||
return DatagramV2
|
return DatagramV2
|
||||||
}
|
}
|
||||||
|
|
||||||
// ClientFeatures will return the list of currently available features that cloudflared should provide to the edge.
|
// ClientFeatures will return the list of currently available features that cloudflared should provide to the edge.
|
||||||
//
|
|
||||||
// This list is dynamic and can change in-between returns.
|
|
||||||
func (fs *FeatureSelector) ClientFeatures() []string {
|
func (fs *FeatureSelector) ClientFeatures() []string {
|
||||||
// Evaluate any remote features along with static feature list to construct the list of features
|
// Evaluate any remote features along with static feature list to construct the list of features
|
||||||
return Dedup(slices.Concat(defaultFeatures, fs.cliFeatures, []string{string(fs.DatagramVersion())}))
|
return dedupAndRemoveFeatures(slices.Concat(defaultFeatures, fs.cliFeatures, []string{string(fs.DatagramVersion())}))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (fs *FeatureSelector) refreshLoop(ctx context.Context, refreshFreq time.Duration) {
|
func (fs *FeatureSelector) init(ctx context.Context) error {
|
||||||
ticker := time.NewTicker(refreshFreq)
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ctx.Done():
|
|
||||||
return
|
|
||||||
case <-ticker.C:
|
|
||||||
err := fs.refresh(ctx)
|
|
||||||
if err != nil {
|
|
||||||
fs.logger.Err(err).Msg("Failed to refresh feature selector")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (fs *FeatureSelector) refresh(ctx context.Context) error {
|
|
||||||
record, err := fs.resolver.lookupRecord(ctx)
|
record, err := fs.resolver.lookupRecord(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -141,9 +105,6 @@ func (fs *FeatureSelector) refresh(ctx context.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
fs.lock.Lock()
|
|
||||||
defer fs.lock.Unlock()
|
|
||||||
|
|
||||||
fs.features = features
|
fs.features = features
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
|
@ -180,8 +141,8 @@ func (dr *dnsResolver) lookupRecord(ctx context.Context) ([]byte, error) {
|
||||||
return []byte(records[0]), nil
|
return []byte(records[0]), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func switchThreshold(accountTag string) int32 {
|
func switchThreshold(accountTag string) uint32 {
|
||||||
h := fnv.New32a()
|
h := fnv.New32a()
|
||||||
_, _ = h.Write([]byte(accountTag))
|
_, _ = h.Write([]byte(accountTag))
|
||||||
return int32(h.Sum32() % 100)
|
return h.Sum32() % 100
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,9 +3,7 @@ package features
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
@ -14,33 +12,23 @@ import (
|
||||||
func TestUnmarshalFeaturesRecord(t *testing.T) {
|
func TestUnmarshalFeaturesRecord(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
record []byte
|
record []byte
|
||||||
expectedPercentage int32
|
expectedPercentage uint32
|
||||||
}{
|
}{
|
||||||
{
|
|
||||||
record: []byte(`{"dv3":0}`),
|
|
||||||
expectedPercentage: 0,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
record: []byte(`{"dv3":39}`),
|
|
||||||
expectedPercentage: 39,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
record: []byte(`{"dv3":100}`),
|
|
||||||
expectedPercentage: 100,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
record: []byte(`{}`), // Unmarshal to default struct if key is not present
|
record: []byte(`{}`), // Unmarshal to default struct if key is not present
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
record: []byte(`{"kyber":768}`), // Unmarshal to default struct if key is not present
|
record: []byte(`{"kyber":768}`), // Unmarshal to default struct if key is not present
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
record: []byte(`{"pq": 101,"dv3":100}`), // Expired keys don't unmarshal to anything
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
var features featuresRecord
|
var features featuresRecord
|
||||||
err := json.Unmarshal(test.record, &features)
|
err := json.Unmarshal(test.record, &features)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.Equal(t, test.expectedPercentage, features.DatagramV3Percentage, test)
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -61,7 +49,7 @@ func TestFeaturePrecedenceEvaluationPostQuantum(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "user_specified",
|
name: "user_specified",
|
||||||
cli: true,
|
cli: true,
|
||||||
expectedFeatures: Dedup(append(defaultFeatures, FeaturePostQuantum)),
|
expectedFeatures: dedupAndRemoveFeatures(append(defaultFeatures, FeaturePostQuantum)),
|
||||||
expectedVersion: PostQuantumStrict,
|
expectedVersion: PostQuantumStrict,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
@ -69,7 +57,7 @@ func TestFeaturePrecedenceEvaluationPostQuantum(t *testing.T) {
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
resolver := &staticResolver{record: featuresRecord{}}
|
resolver := &staticResolver{record: featuresRecord{}}
|
||||||
selector, err := newFeatureSelector(context.Background(), test.name, &logger, resolver, []string{}, test.cli, time.Second)
|
selector, err := newFeatureSelector(context.Background(), test.name, &logger, resolver, []string{}, test.cli)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.ElementsMatch(t, test.expectedFeatures, selector.ClientFeatures())
|
require.ElementsMatch(t, test.expectedFeatures, selector.ClientFeatures())
|
||||||
require.Equal(t, test.expectedVersion, selector.PostQuantumMode())
|
require.Equal(t, test.expectedVersion, selector.PostQuantumMode())
|
||||||
|
@ -102,44 +90,17 @@ func TestFeaturePrecedenceEvaluationDatagramVersion(t *testing.T) {
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "user_specified_v3",
|
name: "user_specified_v3",
|
||||||
cli: []string{FeatureDatagramV3},
|
cli: []string{FeatureDatagramV3_1},
|
||||||
remote: featuresRecord{},
|
remote: featuresRecord{},
|
||||||
expectedFeatures: Dedup(append(defaultFeatures, FeatureDatagramV3)),
|
expectedFeatures: dedupAndRemoveFeatures(append(defaultFeatures, FeatureDatagramV3_1)),
|
||||||
expectedVersion: FeatureDatagramV3,
|
expectedVersion: FeatureDatagramV3_1,
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "remote_specified_v3",
|
|
||||||
cli: []string{},
|
|
||||||
remote: featuresRecord{
|
|
||||||
DatagramV3Percentage: 100,
|
|
||||||
},
|
|
||||||
expectedFeatures: Dedup(append(defaultFeatures, FeatureDatagramV3)),
|
|
||||||
expectedVersion: FeatureDatagramV3,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "remote_and_user_specified_v3",
|
|
||||||
cli: []string{FeatureDatagramV3},
|
|
||||||
remote: featuresRecord{
|
|
||||||
DatagramV3Percentage: 100,
|
|
||||||
},
|
|
||||||
expectedFeatures: Dedup(append(defaultFeatures, FeatureDatagramV3)),
|
|
||||||
expectedVersion: FeatureDatagramV3,
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name: "remote_v3_and_user_specified_v2",
|
|
||||||
cli: []string{FeatureDatagramV2},
|
|
||||||
remote: featuresRecord{
|
|
||||||
DatagramV3Percentage: 100,
|
|
||||||
},
|
|
||||||
expectedFeatures: defaultFeatures,
|
|
||||||
expectedVersion: DatagramV2,
|
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
t.Run(test.name, func(t *testing.T) {
|
t.Run(test.name, func(t *testing.T) {
|
||||||
resolver := &staticResolver{record: test.remote}
|
resolver := &staticResolver{record: test.remote}
|
||||||
selector, err := newFeatureSelector(context.Background(), test.name, &logger, resolver, test.cli, false, time.Second)
|
selector, err := newFeatureSelector(context.Background(), test.name, &logger, resolver, test.cli, false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
require.ElementsMatch(t, test.expectedFeatures, selector.ClientFeatures())
|
require.ElementsMatch(t, test.expectedFeatures, selector.ClientFeatures())
|
||||||
require.Equal(t, test.expectedVersion, selector.DatagramVersion())
|
require.Equal(t, test.expectedVersion, selector.DatagramVersion())
|
||||||
|
@ -147,75 +108,59 @@ func TestFeaturePrecedenceEvaluationDatagramVersion(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRefreshFeaturesRecord(t *testing.T) {
|
func TestDeprecatedFeaturesRemoved(t *testing.T) {
|
||||||
// The hash of the accountTag is 82
|
logger := zerolog.Nop()
|
||||||
accountTag := t.Name()
|
tests := []struct {
|
||||||
threshold := switchThreshold(accountTag)
|
name string
|
||||||
|
cli []string
|
||||||
percentages := []int32{0, 10, 81, 82, 83, 100, 101, 1000}
|
remote featuresRecord
|
||||||
refreshFreq := time.Millisecond * 10
|
expectedFeatures []string
|
||||||
selector := newTestSelector(t, percentages, false, refreshFreq)
|
}{
|
||||||
|
{
|
||||||
// Starting out should default to DatagramV2
|
name: "no_removals",
|
||||||
require.Equal(t, DatagramV2, selector.DatagramVersion())
|
cli: []string{},
|
||||||
|
remote: featuresRecord{},
|
||||||
for _, percentage := range percentages {
|
expectedFeatures: defaultFeatures,
|
||||||
if percentage > threshold {
|
},
|
||||||
require.Equal(t, DatagramV3, selector.DatagramVersion())
|
{
|
||||||
} else {
|
name: "support_datagram_v3",
|
||||||
require.Equal(t, DatagramV2, selector.DatagramVersion())
|
cli: []string{DeprecatedFeatureDatagramV3},
|
||||||
|
remote: featuresRecord{},
|
||||||
|
expectedFeatures: defaultFeatures,
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
time.Sleep(refreshFreq + time.Millisecond)
|
for _, test := range tests {
|
||||||
|
t.Run(test.name, func(t *testing.T) {
|
||||||
|
resolver := &staticResolver{record: test.remote}
|
||||||
|
selector, err := newFeatureSelector(context.Background(), test.name, &logger, resolver, test.cli, false)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.ElementsMatch(t, test.expectedFeatures, selector.ClientFeatures())
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
// Make sure error doesn't override the last fetched features
|
|
||||||
require.Equal(t, DatagramV3, selector.DatagramVersion())
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestStaticFeatures(t *testing.T) {
|
func TestStaticFeatures(t *testing.T) {
|
||||||
percentages := []int32{0}
|
percentages := []uint32{0}
|
||||||
// PostQuantum Enabled from user flag
|
// PostQuantum Enabled from user flag
|
||||||
selector := newTestSelector(t, percentages, true, time.Millisecond*10)
|
selector := newTestSelector(t, percentages, true)
|
||||||
require.Equal(t, PostQuantumStrict, selector.PostQuantumMode())
|
require.Equal(t, PostQuantumStrict, selector.PostQuantumMode())
|
||||||
|
|
||||||
// PostQuantum Disabled (or not set)
|
// PostQuantum Disabled (or not set)
|
||||||
selector = newTestSelector(t, percentages, false, time.Millisecond*10)
|
selector = newTestSelector(t, percentages, false)
|
||||||
require.Equal(t, PostQuantumPrefer, selector.PostQuantumMode())
|
require.Equal(t, PostQuantumPrefer, selector.PostQuantumMode())
|
||||||
}
|
}
|
||||||
|
|
||||||
func newTestSelector(t *testing.T, percentages []int32, pq bool, refreshFreq time.Duration) *FeatureSelector {
|
func newTestSelector(t *testing.T, percentages []uint32, pq bool) *FeatureSelector {
|
||||||
accountTag := t.Name()
|
accountTag := t.Name()
|
||||||
logger := zerolog.Nop()
|
logger := zerolog.Nop()
|
||||||
|
|
||||||
resolver := &mockResolver{
|
selector, err := newFeatureSelector(context.Background(), accountTag, &logger, &staticResolver{}, []string{}, pq)
|
||||||
percentages: percentages,
|
|
||||||
}
|
|
||||||
|
|
||||||
selector, err := newFeatureSelector(context.Background(), accountTag, &logger, resolver, []string{}, pq, refreshFreq)
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
return selector
|
return selector
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockResolver struct {
|
|
||||||
nextIndex int
|
|
||||||
percentages []int32
|
|
||||||
}
|
|
||||||
|
|
||||||
func (mr *mockResolver) lookupRecord(ctx context.Context) ([]byte, error) {
|
|
||||||
if mr.nextIndex >= len(mr.percentages) {
|
|
||||||
return nil, fmt.Errorf("no more record to lookup")
|
|
||||||
}
|
|
||||||
|
|
||||||
record, err := json.Marshal(featuresRecord{
|
|
||||||
DatagramV3Percentage: mr.percentages[mr.nextIndex],
|
|
||||||
})
|
|
||||||
mr.nextIndex++
|
|
||||||
|
|
||||||
return record, err
|
|
||||||
}
|
|
||||||
|
|
||||||
type staticResolver struct {
|
type staticResolver struct {
|
||||||
record featuresRecord
|
record featuresRecord
|
||||||
}
|
}
|
||||||
|
|
|
@ -61,7 +61,7 @@ def assert_tag_exists(repo, version):
|
||||||
raise Exception("Tag {} not found".format(version))
|
raise Exception("Tag {} not found".format(version))
|
||||||
|
|
||||||
|
|
||||||
def get_or_create_release(repo, version, dry_run=False):
|
def get_or_create_release(repo, version, dry_run=False, is_draft=False):
|
||||||
"""
|
"""
|
||||||
Get a Github Release matching the version tag or create a new one.
|
Get a Github Release matching the version tag or create a new one.
|
||||||
If a conflict occurs on creation, attempt to fetch the Release on last time
|
If a conflict occurs on creation, attempt to fetch the Release on last time
|
||||||
|
@ -81,8 +81,11 @@ def get_or_create_release(repo, version, dry_run=False):
|
||||||
return
|
return
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
if is_draft:
|
||||||
|
logging.info("Drafting release %s", version)
|
||||||
|
else:
|
||||||
logging.info("Creating release %s", version)
|
logging.info("Creating release %s", version)
|
||||||
return repo.create_git_release(version, version, "")
|
return repo.create_git_release(version, version, "", is_draft)
|
||||||
except GithubException as e:
|
except GithubException as e:
|
||||||
errors = e.data.get("errors", [])
|
errors = e.data.get("errors", [])
|
||||||
if e.status == 422 and any(
|
if e.status == 422 and any(
|
||||||
|
@ -129,6 +132,10 @@ def parse_args():
|
||||||
"--dry-run", action="store_true", help="Do not create release or upload asset"
|
"--dry-run", action="store_true", help="Do not create release or upload asset"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
parser.add_argument(
|
||||||
|
"--draft", action="store_true", help="Create a draft release"
|
||||||
|
)
|
||||||
|
|
||||||
args = parser.parse_args()
|
args = parser.parse_args()
|
||||||
is_valid = True
|
is_valid = True
|
||||||
if not args.release_version:
|
if not args.release_version:
|
||||||
|
@ -292,7 +299,7 @@ def main():
|
||||||
for filename in onlyfiles:
|
for filename in onlyfiles:
|
||||||
binary_path = os.path.join(args.path, filename)
|
binary_path = os.path.join(args.path, filename)
|
||||||
assert_asset_version(binary_path, args.release_version)
|
assert_asset_version(binary_path, args.release_version)
|
||||||
release = get_or_create_release(repo, args.release_version, args.dry_run)
|
release = get_or_create_release(repo, args.release_version, args.dry_run, args.draft)
|
||||||
for filename in onlyfiles:
|
for filename in onlyfiles:
|
||||||
binary_path = os.path.join(args.path, filename)
|
binary_path = os.path.join(args.path, filename)
|
||||||
upload_asset(release, binary_path, filename, args.release_version, args.kv_account_id, args.namespace_id,
|
upload_asset(release, binary_path, filename, args.release_version, args.kv_account_id, args.namespace_id,
|
||||||
|
|
|
@ -4,7 +4,6 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path"
|
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
@ -16,6 +15,7 @@ import (
|
||||||
"golang.org/x/term"
|
"golang.org/x/term"
|
||||||
"gopkg.in/natefinch/lumberjack.v2"
|
"gopkg.in/natefinch/lumberjack.v2"
|
||||||
|
|
||||||
|
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/management"
|
"github.com/cloudflare/cloudflared/management"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -23,14 +23,6 @@ const (
|
||||||
EnableTerminalLog = false
|
EnableTerminalLog = false
|
||||||
DisableTerminalLog = true
|
DisableTerminalLog = true
|
||||||
|
|
||||||
LogLevelFlag = "loglevel"
|
|
||||||
LogFileFlag = "logfile"
|
|
||||||
LogDirectoryFlag = "log-directory"
|
|
||||||
LogTransportLevelFlag = "transport-loglevel"
|
|
||||||
|
|
||||||
LogSSHDirectoryFlag = "log-directory"
|
|
||||||
LogSSHLevelFlag = "log-level"
|
|
||||||
|
|
||||||
dirPermMode = 0744 // rwxr--r--
|
dirPermMode = 0744 // rwxr--r--
|
||||||
filePermMode = 0644 // rw-r--r--
|
filePermMode = 0644 // rw-r--r--
|
||||||
|
|
||||||
|
@ -137,15 +129,15 @@ func newZerolog(loggerConfig *Config) *zerolog.Logger {
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateTransportLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
|
func CreateTransportLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
|
||||||
return createFromContext(c, LogTransportLevelFlag, LogDirectoryFlag, disableTerminal)
|
return createFromContext(c, cfdflags.TransportLogLevel, cfdflags.LogDirectory, disableTerminal)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
|
func CreateLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
|
||||||
return createFromContext(c, LogLevelFlag, LogDirectoryFlag, disableTerminal)
|
return createFromContext(c, cfdflags.LogLevel, cfdflags.LogDirectory, disableTerminal)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateSSHLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
|
func CreateSSHLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
|
||||||
return createFromContext(c, LogSSHLevelFlag, LogSSHDirectoryFlag, disableTerminal)
|
return createFromContext(c, cfdflags.LogLevelSSH, cfdflags.LogDirectory, disableTerminal)
|
||||||
}
|
}
|
||||||
|
|
||||||
func createFromContext(
|
func createFromContext(
|
||||||
|
@ -155,7 +147,7 @@ func createFromContext(
|
||||||
disableTerminal bool,
|
disableTerminal bool,
|
||||||
) *zerolog.Logger {
|
) *zerolog.Logger {
|
||||||
logLevel := c.String(logLevelFlagName)
|
logLevel := c.String(logLevelFlagName)
|
||||||
logFile := c.String(LogFileFlag)
|
logFile := c.String(cfdflags.LogFile)
|
||||||
logDirectory := c.String(logDirectoryFlagName)
|
logDirectory := c.String(logDirectoryFlagName)
|
||||||
|
|
||||||
loggerConfig := CreateConfig(
|
loggerConfig := CreateConfig(
|
||||||
|
@ -167,7 +159,7 @@ func createFromContext(
|
||||||
|
|
||||||
log := newZerolog(loggerConfig)
|
log := newZerolog(loggerConfig)
|
||||||
if incompatibleFlagsSet := logFile != "" && logDirectory != ""; incompatibleFlagsSet {
|
if incompatibleFlagsSet := logFile != "" && logDirectory != ""; incompatibleFlagsSet {
|
||||||
log.Error().Msgf("Your config includes values for both %s (%s) and %s (%s), but they are incompatible. %s takes precedence.", LogFileFlag, logFile, logDirectoryFlagName, logDirectory, LogFileFlag)
|
log.Error().Msgf("Your config includes values for both %s (%s) and %s (%s), but they are incompatible. %s takes precedence.", cfdflags.LogFile, logFile, logDirectoryFlagName, logDirectory, cfdflags.LogFile)
|
||||||
}
|
}
|
||||||
return log
|
return log
|
||||||
}
|
}
|
||||||
|
@ -206,7 +198,6 @@ var (
|
||||||
|
|
||||||
func createFileWriter(config FileConfig) (io.Writer, error) {
|
func createFileWriter(config FileConfig) (io.Writer, error) {
|
||||||
singleFileInit.once.Do(func() {
|
singleFileInit.once.Do(func() {
|
||||||
|
|
||||||
var logFile io.Writer
|
var logFile io.Writer
|
||||||
fullpath := config.Fullpath()
|
fullpath := config.Fullpath()
|
||||||
|
|
||||||
|
@ -257,7 +248,7 @@ func createRollingLogger(config RollingConfig) (io.Writer, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
rotatingFileInit.writer = &lumberjack.Logger{
|
rotatingFileInit.writer = &lumberjack.Logger{
|
||||||
Filename: path.Join(config.Dirname, config.Filename),
|
Filename: filepath.Join(config.Dirname, config.Filename),
|
||||||
MaxBackups: config.maxBackups,
|
MaxBackups: config.maxBackups,
|
||||||
MaxSize: config.maxSize,
|
MaxSize: config.maxSize,
|
||||||
MaxAge: config.maxAge,
|
MaxAge: config.maxAge,
|
||||||
|
|
|
@ -74,7 +74,7 @@ type EventLog struct {
|
||||||
type LogEventType int8
|
type LogEventType int8
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// Cloudflared events are signficant to cloudflared operations like connection state changes.
|
// Cloudflared events are significant to cloudflared operations like connection state changes.
|
||||||
// Cloudflared is also the default event type for any events that haven't been separated into a proper event type.
|
// Cloudflared is also the default event type for any events that haven't been separated into a proper event type.
|
||||||
Cloudflared LogEventType = iota
|
Cloudflared LogEventType = iota
|
||||||
HTTP
|
HTTP
|
||||||
|
@ -129,7 +129,7 @@ func (e *LogEventType) UnmarshalJSON(data []byte) error {
|
||||||
|
|
||||||
// LogLevel corresponds to the zerolog logging levels
|
// LogLevel corresponds to the zerolog logging levels
|
||||||
// "panic", "fatal", and "trace" are exempt from this list as they are rarely used and, at least
|
// "panic", "fatal", and "trace" are exempt from this list as they are rarely used and, at least
|
||||||
// the the first two are limited to failure conditions that lead to cloudflared shutting down.
|
// the first two are limited to failure conditions that lead to cloudflared shutting down.
|
||||||
type LogLevel int8
|
type LogLevel int8
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
|
|
@ -11,6 +11,7 @@ import (
|
||||||
pkgerrors "github.com/pkg/errors"
|
pkgerrors "github.com/pkg/errors"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
"github.com/cloudflare/cloudflared/config"
|
"github.com/cloudflare/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
cfdflow "github.com/cloudflare/cloudflared/flow"
|
cfdflow "github.com/cloudflare/cloudflared/flow"
|
||||||
|
@ -120,7 +121,7 @@ func (o *Orchestrator) UpdateConfig(version int32, config []byte) *pogs.UpdateCo
|
||||||
// overrideRemoteWarpRoutingWithLocalValues overrides the ingress.WarpRoutingConfig that comes from the remote with
|
// overrideRemoteWarpRoutingWithLocalValues overrides the ingress.WarpRoutingConfig that comes from the remote with
|
||||||
// the local values if there is any.
|
// the local values if there is any.
|
||||||
func (o *Orchestrator) overrideRemoteWarpRoutingWithLocalValues(remoteWarpRouting *ingress.WarpRoutingConfig) error {
|
func (o *Orchestrator) overrideRemoteWarpRoutingWithLocalValues(remoteWarpRouting *ingress.WarpRoutingConfig) error {
|
||||||
return o.overrideMaxActiveFlows(o.config.ConfigurationFlags["max-active-flows"], remoteWarpRouting)
|
return o.overrideMaxActiveFlows(o.config.ConfigurationFlags[flags.MaxActiveFlows], remoteWarpRouting)
|
||||||
}
|
}
|
||||||
|
|
||||||
// overrideMaxActiveFlows checks the local configuration flags, and if a value is found for the flags.MaxActiveFlows
|
// overrideMaxActiveFlows checks the local configuration flags, and if a value is found for the flags.MaxActiveFlows
|
||||||
|
@ -133,7 +134,7 @@ func (o *Orchestrator) overrideMaxActiveFlows(maxActiveFlowsLocalConfig string,
|
||||||
|
|
||||||
maxActiveFlowsLocalOverride, err := strconv.ParseUint(maxActiveFlowsLocalConfig, 10, 64)
|
maxActiveFlowsLocalOverride, err := strconv.ParseUint(maxActiveFlowsLocalConfig, 10, 64)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return pkgerrors.Wrapf(err, "failed to parse %s", "max-active-flows")
|
return pkgerrors.Wrapf(err, "failed to parse %s", flags.MaxActiveFlows)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Override the value that comes from the remote with the local value
|
// Override the value that comes from the remote with the local value
|
||||||
|
|
|
@ -19,6 +19,8 @@ import (
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/config"
|
"github.com/cloudflare/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/ingress"
|
"github.com/cloudflare/cloudflared/ingress"
|
||||||
|
@ -421,7 +423,7 @@ func TestOverrideWarpRoutingConfigWithLocalValues(t *testing.T) {
|
||||||
|
|
||||||
// Add a local override for the maxActiveFlows
|
// Add a local override for the maxActiveFlows
|
||||||
localValue := uint64(500)
|
localValue := uint64(500)
|
||||||
remoteConfig.ConfigurationFlags["max-active-flows"] = fmt.Sprintf("%d", localValue)
|
remoteConfig.ConfigurationFlags[flags.MaxActiveFlows] = fmt.Sprintf("%d", localValue)
|
||||||
// Force a configuration refresh
|
// Force a configuration refresh
|
||||||
err = orchestrator.updateIngress(remoteIngress, remoteWarpConfig)
|
err = orchestrator.updateIngress(remoteIngress, remoteWarpConfig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -430,7 +432,7 @@ func TestOverrideWarpRoutingConfigWithLocalValues(t *testing.T) {
|
||||||
assertMaxActiveFlows(orchestrator, localValue)
|
assertMaxActiveFlows(orchestrator, localValue)
|
||||||
|
|
||||||
// Remove local override for the maxActiveFlows
|
// Remove local override for the maxActiveFlows
|
||||||
delete(remoteConfig.ConfigurationFlags, "max-active-flows")
|
delete(remoteConfig.ConfigurationFlags, flags.MaxActiveFlows)
|
||||||
// Force a configuration refresh
|
// Force a configuration refresh
|
||||||
err = orchestrator.updateIngress(remoteIngress, remoteWarpConfig)
|
err = orchestrator.updateIngress(remoteIngress, remoteWarpConfig)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
|
@ -12,10 +12,13 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
namespace = "quic"
|
namespace = "quic"
|
||||||
|
ConnectionIndexMetricLabel = "conn_index"
|
||||||
|
frameTypeMetricLabel = "frame_type"
|
||||||
|
packetTypeMetricLabel = "packet_type"
|
||||||
|
reasonMetricLabel = "reason"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
clientConnLabels = []string{"conn_index"}
|
|
||||||
clientMetrics = struct {
|
clientMetrics = struct {
|
||||||
totalConnections prometheus.Counter
|
totalConnections prometheus.Counter
|
||||||
closedConnections prometheus.Counter
|
closedConnections prometheus.Counter
|
||||||
|
@ -35,7 +38,7 @@ var (
|
||||||
congestionState *prometheus.GaugeVec
|
congestionState *prometheus.GaugeVec
|
||||||
}{
|
}{
|
||||||
totalConnections: prometheus.NewCounter(
|
totalConnections: prometheus.NewCounter(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: "client",
|
Subsystem: "client",
|
||||||
Name: "total_connections",
|
Name: "total_connections",
|
||||||
|
@ -43,7 +46,7 @@ var (
|
||||||
},
|
},
|
||||||
),
|
),
|
||||||
closedConnections: prometheus.NewCounter(
|
closedConnections: prometheus.NewCounter(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: "client",
|
Subsystem: "client",
|
||||||
Name: "closed_connections",
|
Name: "closed_connections",
|
||||||
|
@ -57,70 +60,70 @@ var (
|
||||||
Name: "max_udp_payload",
|
Name: "max_udp_payload",
|
||||||
Help: "Maximum UDP payload size in bytes for a QUIC packet",
|
Help: "Maximum UDP payload size in bytes for a QUIC packet",
|
||||||
},
|
},
|
||||||
clientConnLabels,
|
[]string{ConnectionIndexMetricLabel},
|
||||||
),
|
),
|
||||||
sentFrames: prometheus.NewCounterVec(
|
sentFrames: prometheus.NewCounterVec(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: "client",
|
Subsystem: "client",
|
||||||
Name: "sent_frames",
|
Name: "sent_frames",
|
||||||
Help: "Number of frames that have been sent through a connection",
|
Help: "Number of frames that have been sent through a connection",
|
||||||
},
|
},
|
||||||
append(clientConnLabels, "frame_type"),
|
[]string{ConnectionIndexMetricLabel, frameTypeMetricLabel},
|
||||||
),
|
),
|
||||||
sentBytes: prometheus.NewCounterVec(
|
sentBytes: prometheus.NewCounterVec(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: "client",
|
Subsystem: "client",
|
||||||
Name: "sent_bytes",
|
Name: "sent_bytes",
|
||||||
Help: "Number of bytes that have been sent through a connection",
|
Help: "Number of bytes that have been sent through a connection",
|
||||||
},
|
},
|
||||||
clientConnLabels,
|
[]string{ConnectionIndexMetricLabel},
|
||||||
),
|
),
|
||||||
receivedFrames: prometheus.NewCounterVec(
|
receivedFrames: prometheus.NewCounterVec(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: "client",
|
Subsystem: "client",
|
||||||
Name: "received_frames",
|
Name: "received_frames",
|
||||||
Help: "Number of frames that have been received through a connection",
|
Help: "Number of frames that have been received through a connection",
|
||||||
},
|
},
|
||||||
append(clientConnLabels, "frame_type"),
|
[]string{ConnectionIndexMetricLabel, frameTypeMetricLabel},
|
||||||
),
|
),
|
||||||
receivedBytes: prometheus.NewCounterVec(
|
receivedBytes: prometheus.NewCounterVec(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: "client",
|
Subsystem: "client",
|
||||||
Name: "receive_bytes",
|
Name: "receive_bytes",
|
||||||
Help: "Number of bytes that have been received through a connection",
|
Help: "Number of bytes that have been received through a connection",
|
||||||
},
|
},
|
||||||
clientConnLabels,
|
[]string{ConnectionIndexMetricLabel},
|
||||||
),
|
),
|
||||||
bufferedPackets: prometheus.NewCounterVec(
|
bufferedPackets: prometheus.NewCounterVec(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: "client",
|
Subsystem: "client",
|
||||||
Name: "buffered_packets",
|
Name: "buffered_packets",
|
||||||
Help: "Number of bytes that have been buffered on a connection",
|
Help: "Number of bytes that have been buffered on a connection",
|
||||||
},
|
},
|
||||||
append(clientConnLabels, "packet_type"),
|
[]string{ConnectionIndexMetricLabel, packetTypeMetricLabel},
|
||||||
),
|
),
|
||||||
droppedPackets: prometheus.NewCounterVec(
|
droppedPackets: prometheus.NewCounterVec(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: "client",
|
Subsystem: "client",
|
||||||
Name: "dropped_packets",
|
Name: "dropped_packets",
|
||||||
Help: "Number of bytes that have been dropped on a connection",
|
Help: "Number of bytes that have been dropped on a connection",
|
||||||
},
|
},
|
||||||
append(clientConnLabels, "packet_type", "reason"),
|
[]string{ConnectionIndexMetricLabel, packetTypeMetricLabel, reasonMetricLabel},
|
||||||
),
|
),
|
||||||
lostPackets: prometheus.NewCounterVec(
|
lostPackets: prometheus.NewCounterVec(
|
||||||
prometheus.CounterOpts{
|
prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: "client",
|
Subsystem: "client",
|
||||||
Name: "lost_packets",
|
Name: "lost_packets",
|
||||||
Help: "Number of packets that have been lost from a connection",
|
Help: "Number of packets that have been lost from a connection",
|
||||||
},
|
},
|
||||||
append(clientConnLabels, "reason"),
|
[]string{ConnectionIndexMetricLabel, reasonMetricLabel},
|
||||||
),
|
),
|
||||||
minRTT: prometheus.NewGaugeVec(
|
minRTT: prometheus.NewGaugeVec(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
|
@ -129,7 +132,7 @@ var (
|
||||||
Name: "min_rtt",
|
Name: "min_rtt",
|
||||||
Help: "Lowest RTT measured on a connection in millisec",
|
Help: "Lowest RTT measured on a connection in millisec",
|
||||||
},
|
},
|
||||||
clientConnLabels,
|
[]string{ConnectionIndexMetricLabel},
|
||||||
),
|
),
|
||||||
latestRTT: prometheus.NewGaugeVec(
|
latestRTT: prometheus.NewGaugeVec(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
|
@ -138,7 +141,7 @@ var (
|
||||||
Name: "latest_rtt",
|
Name: "latest_rtt",
|
||||||
Help: "Latest RTT measured on a connection",
|
Help: "Latest RTT measured on a connection",
|
||||||
},
|
},
|
||||||
clientConnLabels,
|
[]string{ConnectionIndexMetricLabel},
|
||||||
),
|
),
|
||||||
smoothedRTT: prometheus.NewGaugeVec(
|
smoothedRTT: prometheus.NewGaugeVec(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
|
@ -147,7 +150,7 @@ var (
|
||||||
Name: "smoothed_rtt",
|
Name: "smoothed_rtt",
|
||||||
Help: "Calculated smoothed RTT measured on a connection in millisec",
|
Help: "Calculated smoothed RTT measured on a connection in millisec",
|
||||||
},
|
},
|
||||||
clientConnLabels,
|
[]string{ConnectionIndexMetricLabel},
|
||||||
),
|
),
|
||||||
mtu: prometheus.NewGaugeVec(
|
mtu: prometheus.NewGaugeVec(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
|
@ -156,7 +159,7 @@ var (
|
||||||
Name: "mtu",
|
Name: "mtu",
|
||||||
Help: "Current maximum transmission unit (MTU) of a connection",
|
Help: "Current maximum transmission unit (MTU) of a connection",
|
||||||
},
|
},
|
||||||
clientConnLabels,
|
[]string{ConnectionIndexMetricLabel},
|
||||||
),
|
),
|
||||||
congestionWindow: prometheus.NewGaugeVec(
|
congestionWindow: prometheus.NewGaugeVec(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
|
@ -165,7 +168,7 @@ var (
|
||||||
Name: "congestion_window",
|
Name: "congestion_window",
|
||||||
Help: "Current congestion window size",
|
Help: "Current congestion window size",
|
||||||
},
|
},
|
||||||
clientConnLabels,
|
[]string{ConnectionIndexMetricLabel},
|
||||||
),
|
),
|
||||||
congestionState: prometheus.NewGaugeVec(
|
congestionState: prometheus.NewGaugeVec(
|
||||||
prometheus.GaugeOpts{
|
prometheus.GaugeOpts{
|
||||||
|
@ -174,13 +177,13 @@ var (
|
||||||
Name: "congestion_state",
|
Name: "congestion_state",
|
||||||
Help: "Current congestion control state. See https://pkg.go.dev/github.com/quic-go/quic-go@v0.45.0/logging#CongestionState for what each value maps to",
|
Help: "Current congestion control state. See https://pkg.go.dev/github.com/quic-go/quic-go@v0.45.0/logging#CongestionState for what each value maps to",
|
||||||
},
|
},
|
||||||
clientConnLabels,
|
[]string{ConnectionIndexMetricLabel},
|
||||||
),
|
),
|
||||||
}
|
}
|
||||||
|
|
||||||
registerClient = sync.Once{}
|
registerClient = sync.Once{}
|
||||||
|
|
||||||
packetTooBigDropped = prometheus.NewCounter(prometheus.CounterOpts{
|
packetTooBigDropped = prometheus.NewCounter(prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: "client",
|
Subsystem: "client",
|
||||||
Name: "packet_too_big_dropped",
|
Name: "packet_too_big_dropped",
|
||||||
|
|
|
@ -2,82 +2,98 @@ package v3
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/quic"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
namespace = "cloudflared"
|
namespace = "cloudflared"
|
||||||
subsystem = "udp"
|
subsystem = "udp"
|
||||||
|
|
||||||
|
commandMetricLabel = "command"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Metrics interface {
|
type Metrics interface {
|
||||||
IncrementFlows()
|
IncrementFlows(connIndex uint8)
|
||||||
DecrementFlows()
|
DecrementFlows(connIndex uint8)
|
||||||
PayloadTooLarge()
|
PayloadTooLarge(connIndex uint8)
|
||||||
RetryFlowResponse()
|
RetryFlowResponse(connIndex uint8)
|
||||||
MigrateFlow()
|
MigrateFlow(connIndex uint8)
|
||||||
|
UnsupportedRemoteCommand(connIndex uint8, command string)
|
||||||
}
|
}
|
||||||
|
|
||||||
type metrics struct {
|
type metrics struct {
|
||||||
activeUDPFlows prometheus.Gauge
|
activeUDPFlows *prometheus.GaugeVec
|
||||||
totalUDPFlows prometheus.Counter
|
totalUDPFlows *prometheus.CounterVec
|
||||||
payloadTooLarge prometheus.Counter
|
payloadTooLarge *prometheus.CounterVec
|
||||||
retryFlowResponses prometheus.Counter
|
retryFlowResponses *prometheus.CounterVec
|
||||||
migratedFlows prometheus.Counter
|
migratedFlows *prometheus.CounterVec
|
||||||
|
unsupportedRemoteCommands *prometheus.CounterVec
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *metrics) IncrementFlows() {
|
func (m *metrics) IncrementFlows(connIndex uint8) {
|
||||||
m.totalUDPFlows.Inc()
|
m.totalUDPFlows.WithLabelValues(string(connIndex)).Inc()
|
||||||
m.activeUDPFlows.Inc()
|
m.activeUDPFlows.WithLabelValues(string(connIndex)).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *metrics) DecrementFlows() {
|
func (m *metrics) DecrementFlows(connIndex uint8) {
|
||||||
m.activeUDPFlows.Dec()
|
m.activeUDPFlows.WithLabelValues(string(connIndex)).Dec()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *metrics) PayloadTooLarge() {
|
func (m *metrics) PayloadTooLarge(connIndex uint8) {
|
||||||
m.payloadTooLarge.Inc()
|
m.payloadTooLarge.WithLabelValues(string(connIndex)).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *metrics) RetryFlowResponse() {
|
func (m *metrics) RetryFlowResponse(connIndex uint8) {
|
||||||
m.retryFlowResponses.Inc()
|
m.retryFlowResponses.WithLabelValues(string(connIndex)).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (m *metrics) MigrateFlow() {
|
func (m *metrics) MigrateFlow(connIndex uint8) {
|
||||||
m.migratedFlows.Inc()
|
m.migratedFlows.WithLabelValues(string(connIndex)).Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *metrics) UnsupportedRemoteCommand(connIndex uint8, command string) {
|
||||||
|
m.unsupportedRemoteCommands.WithLabelValues(string(connIndex), command).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewMetrics(registerer prometheus.Registerer) Metrics {
|
func NewMetrics(registerer prometheus.Registerer) Metrics {
|
||||||
m := &metrics{
|
m := &metrics{
|
||||||
activeUDPFlows: prometheus.NewGauge(prometheus.GaugeOpts{
|
activeUDPFlows: prometheus.NewGaugeVec(prometheus.GaugeOpts{
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
Name: "active_flows",
|
Name: "active_flows",
|
||||||
Help: "Concurrent count of UDP flows that are being proxied to any origin",
|
Help: "Concurrent count of UDP flows that are being proxied to any origin",
|
||||||
}),
|
}, []string{quic.ConnectionIndexMetricLabel}),
|
||||||
totalUDPFlows: prometheus.NewCounter(prometheus.CounterOpts{
|
totalUDPFlows: prometheus.NewCounterVec(prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
Name: "total_flows",
|
Name: "total_flows",
|
||||||
Help: "Total count of UDP flows that have been proxied to any origin",
|
Help: "Total count of UDP flows that have been proxied to any origin",
|
||||||
}),
|
}, []string{quic.ConnectionIndexMetricLabel}),
|
||||||
payloadTooLarge: prometheus.NewCounter(prometheus.CounterOpts{
|
payloadTooLarge: prometheus.NewCounterVec(prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
Name: "payload_too_large",
|
Name: "payload_too_large",
|
||||||
Help: "Total count of UDP flows that have had origin payloads that are too large to proxy",
|
Help: "Total count of UDP flows that have had origin payloads that are too large to proxy",
|
||||||
}),
|
}, []string{quic.ConnectionIndexMetricLabel}),
|
||||||
retryFlowResponses: prometheus.NewCounter(prometheus.CounterOpts{
|
retryFlowResponses: prometheus.NewCounterVec(prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
Name: "retry_flow_responses",
|
Name: "retry_flow_responses",
|
||||||
Help: "Total count of UDP flows that have had to send their registration response more than once",
|
Help: "Total count of UDP flows that have had to send their registration response more than once",
|
||||||
}),
|
}, []string{quic.ConnectionIndexMetricLabel}),
|
||||||
migratedFlows: prometheus.NewCounter(prometheus.CounterOpts{
|
migratedFlows: prometheus.NewCounterVec(prometheus.CounterOpts{ //nolint:promlinter
|
||||||
Namespace: namespace,
|
Namespace: namespace,
|
||||||
Subsystem: subsystem,
|
Subsystem: subsystem,
|
||||||
Name: "migrated_flows",
|
Name: "migrated_flows",
|
||||||
Help: "Total count of UDP flows have been migrated across local connections",
|
Help: "Total count of UDP flows have been migrated across local connections",
|
||||||
}),
|
}, []string{quic.ConnectionIndexMetricLabel}),
|
||||||
|
unsupportedRemoteCommands: prometheus.NewCounterVec(prometheus.CounterOpts{
|
||||||
|
Namespace: namespace,
|
||||||
|
Subsystem: subsystem,
|
||||||
|
Name: "unsupported_remote_command_total",
|
||||||
|
Help: "Total count of unsupported remote RPC commands for the ",
|
||||||
|
}, []string{quic.ConnectionIndexMetricLabel, commandMetricLabel}),
|
||||||
}
|
}
|
||||||
registerer.MustRegister(
|
registerer.MustRegister(
|
||||||
m.activeUDPFlows,
|
m.activeUDPFlows,
|
||||||
|
@ -85,6 +101,7 @@ func NewMetrics(registerer prometheus.Registerer) Metrics {
|
||||||
m.payloadTooLarge,
|
m.payloadTooLarge,
|
||||||
m.retryFlowResponses,
|
m.retryFlowResponses,
|
||||||
m.migratedFlows,
|
m.migratedFlows,
|
||||||
|
m.unsupportedRemoteCommands,
|
||||||
)
|
)
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,8 +2,9 @@ package v3_test
|
||||||
|
|
||||||
type noopMetrics struct{}
|
type noopMetrics struct{}
|
||||||
|
|
||||||
func (noopMetrics) IncrementFlows() {}
|
func (noopMetrics) IncrementFlows(connIndex uint8) {}
|
||||||
func (noopMetrics) DecrementFlows() {}
|
func (noopMetrics) DecrementFlows(connIndex uint8) {}
|
||||||
func (noopMetrics) PayloadTooLarge() {}
|
func (noopMetrics) PayloadTooLarge(connIndex uint8) {}
|
||||||
func (noopMetrics) RetryFlowResponse() {}
|
func (noopMetrics) RetryFlowResponse(connIndex uint8) {}
|
||||||
func (noopMetrics) MigrateFlow() {}
|
func (noopMetrics) MigrateFlow(connIndex uint8) {}
|
||||||
|
func (noopMetrics) UnsupportedRemoteCommand(connIndex uint8, command string) {}
|
||||||
|
|
|
@ -264,10 +264,10 @@ func (c *datagramConn) handleSessionRegistrationDatagram(ctx context.Context, da
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
log = log.With().Str(logSrcKey, session.LocalAddr().String()).Logger()
|
log = log.With().Str(logSrcKey, session.LocalAddr().String()).Logger()
|
||||||
c.metrics.IncrementFlows()
|
c.metrics.IncrementFlows(c.index)
|
||||||
// Make sure to eventually remove the session from the session manager when the session is closed
|
// Make sure to eventually remove the session from the session manager when the session is closed
|
||||||
defer c.sessionManager.UnregisterSession(session.ID())
|
defer c.sessionManager.UnregisterSession(session.ID())
|
||||||
defer c.metrics.DecrementFlows()
|
defer c.metrics.DecrementFlows(c.index)
|
||||||
|
|
||||||
// Respond that we are able to process the new session
|
// Respond that we are able to process the new session
|
||||||
err = c.SendUDPSessionResponse(datagram.RequestID, ResponseOk)
|
err = c.SendUDPSessionResponse(datagram.RequestID, ResponseOk)
|
||||||
|
@ -315,7 +315,7 @@ func (c *datagramConn) handleSessionAlreadyRegistered(requestID RequestID, logge
|
||||||
// The session is already running in another routine so we want to restart the idle timeout since no proxied
|
// The session is already running in another routine so we want to restart the idle timeout since no proxied
|
||||||
// packets have come down yet.
|
// packets have come down yet.
|
||||||
session.ResetIdleTimer()
|
session.ResetIdleTimer()
|
||||||
c.metrics.RetryFlowResponse()
|
c.metrics.RetryFlowResponse(c.index)
|
||||||
logger.Debug().Msgf("flow registration response retry")
|
logger.Debug().Msgf("flow registration response retry")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -781,12 +781,12 @@ func newICMPDatagram(pk *packet.ICMP) []byte {
|
||||||
|
|
||||||
// Cancel the provided context and make sure it closes with the expected cancellation error
|
// Cancel the provided context and make sure it closes with the expected cancellation error
|
||||||
func assertContextClosed(t *testing.T, ctx context.Context, done <-chan error, cancel context.CancelCauseFunc) {
|
func assertContextClosed(t *testing.T, ctx context.Context, done <-chan error, cancel context.CancelCauseFunc) {
|
||||||
cancel(expectedContextCanceled)
|
cancel(errExpectedContextCanceled)
|
||||||
err := <-done
|
err := <-done
|
||||||
if !errors.Is(err, context.Canceled) {
|
if !errors.Is(err, context.Canceled) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !errors.Is(context.Cause(ctx), expectedContextCanceled) {
|
if !errors.Is(context.Cause(ctx), errExpectedContextCanceled) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -27,11 +27,11 @@ const (
|
||||||
)
|
)
|
||||||
|
|
||||||
// SessionCloseErr indicates that the session's Close method was called.
|
// SessionCloseErr indicates that the session's Close method was called.
|
||||||
var SessionCloseErr error = errors.New("flow was closed directly")
|
var SessionCloseErr error = errors.New("flow was closed directly") //nolint:errname
|
||||||
|
|
||||||
// SessionIdleErr is returned when the session was closed because there was no communication
|
// SessionIdleErr is returned when the session was closed because there was no communication
|
||||||
// in either direction over the session for the timeout period.
|
// in either direction over the session for the timeout period.
|
||||||
type SessionIdleErr struct {
|
type SessionIdleErr struct { //nolint:errname
|
||||||
timeout time.Duration
|
timeout time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -149,7 +149,8 @@ func (s *session) Migrate(eyeball DatagramConn, ctx context.Context, logger *zer
|
||||||
}
|
}
|
||||||
// The session is already running so we want to restart the idle timeout since no proxied packets have come down yet.
|
// The session is already running so we want to restart the idle timeout since no proxied packets have come down yet.
|
||||||
s.markActive()
|
s.markActive()
|
||||||
s.metrics.MigrateFlow()
|
connectionIndex := eyeball.ID()
|
||||||
|
s.metrics.MigrateFlow(connectionIndex)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *session) Serve(ctx context.Context) error {
|
func (s *session) Serve(ctx context.Context) error {
|
||||||
|
@ -160,7 +161,7 @@ func (s *session) Serve(ctx context.Context) error {
|
||||||
// To perform a zero copy write when passing the datagram to the connection, we prepare the buffer with
|
// To perform a zero copy write when passing the datagram to the connection, we prepare the buffer with
|
||||||
// the required datagram header information. We can reuse this buffer for this session since the header is the
|
// the required datagram header information. We can reuse this buffer for this session since the header is the
|
||||||
// same for the each read.
|
// same for the each read.
|
||||||
MarshalPayloadHeaderTo(s.id, readBuffer[:DatagramPayloadHeaderLen])
|
_ = MarshalPayloadHeaderTo(s.id, readBuffer[:DatagramPayloadHeaderLen])
|
||||||
for {
|
for {
|
||||||
// Read from the origin UDP socket
|
// Read from the origin UDP socket
|
||||||
n, err := s.origin.Read(readBuffer[DatagramPayloadHeaderLen:])
|
n, err := s.origin.Read(readBuffer[DatagramPayloadHeaderLen:])
|
||||||
|
@ -177,7 +178,8 @@ func (s *session) Serve(ctx context.Context) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if n > maxDatagramPayloadLen {
|
if n > maxDatagramPayloadLen {
|
||||||
s.metrics.PayloadTooLarge()
|
connectionIndex := s.ConnectionID()
|
||||||
|
s.metrics.PayloadTooLarge(connectionIndex)
|
||||||
s.log.Error().Int(logPacketSizeKey, n).Msg("flow (origin) packet read was too large and was dropped")
|
s.log.Error().Int(logPacketSizeKey, n).Msg("flow (origin) packet read was too large and was dropped")
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
@ -241,7 +243,7 @@ func (s *session) waitForCloseCondition(ctx context.Context, closeAfterIdle time
|
||||||
// Closing the session at the end cancels read so Serve() can return
|
// Closing the session at the end cancels read so Serve() can return
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
if closeAfterIdle == 0 {
|
if closeAfterIdle == 0 {
|
||||||
// provide deafult is caller doesn't specify one
|
// Provided that the default caller doesn't specify one
|
||||||
closeAfterIdle = defaultCloseIdleAfter
|
closeAfterIdle = defaultCloseIdleAfter
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -17,7 +17,7 @@ import (
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
expectedContextCanceled = errors.New("expected context canceled")
|
errExpectedContextCanceled = errors.New("expected context canceled")
|
||||||
|
|
||||||
testOriginAddr = net.UDPAddrFromAddrPort(netip.MustParseAddrPort("127.0.0.1:0"))
|
testOriginAddr = net.UDPAddrFromAddrPort(netip.MustParseAddrPort("127.0.0.1:0"))
|
||||||
testLocalAddr = net.UDPAddrFromAddrPort(netip.MustParseAddrPort("127.0.0.1:0"))
|
testLocalAddr = net.UDPAddrFromAddrPort(netip.MustParseAddrPort("127.0.0.1:0"))
|
||||||
|
@ -40,7 +40,7 @@ func testSessionWrite(t *testing.T, payload []byte) {
|
||||||
serverRead := make(chan []byte, 1)
|
serverRead := make(chan []byte, 1)
|
||||||
go func() {
|
go func() {
|
||||||
read := make([]byte, 1500)
|
read := make([]byte, 1500)
|
||||||
server.Read(read[:])
|
_, _ = server.Read(read[:])
|
||||||
serverRead <- read
|
serverRead <- read
|
||||||
}()
|
}()
|
||||||
// Create session and write to origin
|
// Create session and write to origin
|
||||||
|
@ -110,12 +110,12 @@ func testSessionServe_Origin(t *testing.T, payload []byte) {
|
||||||
case data := <-eyeball.recvData:
|
case data := <-eyeball.recvData:
|
||||||
// check received data matches provided from origin
|
// check received data matches provided from origin
|
||||||
expectedData := makePayload(1500)
|
expectedData := makePayload(1500)
|
||||||
v3.MarshalPayloadHeaderTo(testRequestID, expectedData[:])
|
_ = v3.MarshalPayloadHeaderTo(testRequestID, expectedData[:])
|
||||||
copy(expectedData[17:], payload)
|
copy(expectedData[17:], payload)
|
||||||
if !slices.Equal(expectedData[:v3.DatagramPayloadHeaderLen+len(payload)], data) {
|
if !slices.Equal(expectedData[:v3.DatagramPayloadHeaderLen+len(payload)], data) {
|
||||||
t.Fatal("expected datagram did not equal expected")
|
t.Fatal("expected datagram did not equal expected")
|
||||||
}
|
}
|
||||||
cancel(expectedContextCanceled)
|
cancel(errExpectedContextCanceled)
|
||||||
case err := <-ctx.Done():
|
case err := <-ctx.Done():
|
||||||
// we expect the payload to return before the context to cancel on the session
|
// we expect the payload to return before the context to cancel on the session
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
|
@ -125,7 +125,7 @@ func testSessionServe_Origin(t *testing.T, payload []byte) {
|
||||||
if !errors.Is(err, context.Canceled) {
|
if !errors.Is(err, context.Canceled) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
if !errors.Is(context.Cause(ctx), expectedContextCanceled) {
|
if !errors.Is(context.Cause(ctx), errExpectedContextCanceled) {
|
||||||
t.Fatal(err)
|
t.Fatal(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -198,7 +198,7 @@ func TestSessionServe_Migrate(t *testing.T) {
|
||||||
|
|
||||||
// Origin sends data
|
// Origin sends data
|
||||||
payload2 := []byte{0xde}
|
payload2 := []byte{0xde}
|
||||||
pipe1.Write(payload2)
|
_, _ = pipe1.Write(payload2)
|
||||||
|
|
||||||
// Expect write to eyeball2
|
// Expect write to eyeball2
|
||||||
data := <-eyeball2.recvData
|
data := <-eyeball2.recvData
|
||||||
|
@ -249,13 +249,13 @@ func TestSessionServe_Migrate_CloseContext2(t *testing.T) {
|
||||||
t.Fatalf("expected session to still be running")
|
t.Fatalf("expected session to still be running")
|
||||||
default:
|
default:
|
||||||
}
|
}
|
||||||
if context.Cause(eyeball1Ctx) != contextCancelErr {
|
if !errors.Is(context.Cause(eyeball1Ctx), contextCancelErr) {
|
||||||
t.Fatalf("first eyeball context should be cancelled manually: %+v", context.Cause(eyeball1Ctx))
|
t.Fatalf("first eyeball context should be cancelled manually: %+v", context.Cause(eyeball1Ctx))
|
||||||
}
|
}
|
||||||
|
|
||||||
// Origin sends data
|
// Origin sends data
|
||||||
payload2 := []byte{0xde}
|
payload2 := []byte{0xde}
|
||||||
pipe1.Write(payload2)
|
_, _ = pipe1.Write(payload2)
|
||||||
|
|
||||||
// Expect write to eyeball2
|
// Expect write to eyeball2
|
||||||
data := <-eyeball2.recvData
|
data := <-eyeball2.recvData
|
||||||
|
|
|
@ -79,8 +79,8 @@ func (b *BackoffHandler) BackoffTimer() <-chan time.Time {
|
||||||
} else {
|
} else {
|
||||||
b.retries++
|
b.retries++
|
||||||
}
|
}
|
||||||
maxTimeToWait := time.Duration(b.GetBaseTime() * 1 << (b.retries))
|
maxTimeToWait := b.GetBaseTime() * (1 << b.retries)
|
||||||
timeToWait := time.Duration(rand.Int63n(maxTimeToWait.Nanoseconds()))
|
timeToWait := time.Duration(rand.Int63n(maxTimeToWait.Nanoseconds())) // #nosec G404
|
||||||
return b.Clock.After(timeToWait)
|
return b.Clock.After(timeToWait)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -99,11 +99,11 @@ func (b *BackoffHandler) Backoff(ctx context.Context) bool {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Sets a grace period within which the the backoff timer is maintained. After the grace
|
// Sets a grace period within which the backoff timer is maintained. After the grace
|
||||||
// period expires, the number of retries & backoff duration is reset.
|
// period expires, the number of retries & backoff duration is reset.
|
||||||
func (b *BackoffHandler) SetGracePeriod() time.Duration {
|
func (b *BackoffHandler) SetGracePeriod() time.Duration {
|
||||||
maxTimeToWait := b.GetBaseTime() * 2 << (b.retries + 1)
|
maxTimeToWait := b.GetBaseTime() * 2 << (b.retries + 1)
|
||||||
timeToWait := time.Duration(rand.Int63n(maxTimeToWait.Nanoseconds()))
|
timeToWait := time.Duration(rand.Int63n(maxTimeToWait.Nanoseconds())) // #nosec G404
|
||||||
b.resetDeadline = b.Clock.Now().Add(timeToWait)
|
b.resetDeadline = b.Clock.Now().Add(timeToWait)
|
||||||
|
|
||||||
return timeToWait
|
return timeToWait
|
||||||
|
@ -118,7 +118,7 @@ func (b BackoffHandler) GetBaseTime() time.Duration {
|
||||||
|
|
||||||
// Retries returns the number of retries consumed so far.
|
// Retries returns the number of retries consumed so far.
|
||||||
func (b *BackoffHandler) Retries() int {
|
func (b *BackoffHandler) Retries() int {
|
||||||
return int(b.retries)
|
return int(b.retries) // #nosec G115
|
||||||
}
|
}
|
||||||
|
|
||||||
func (b *BackoffHandler) ReachedMaxRetries() bool {
|
func (b *BackoffHandler) ReachedMaxRetries() bool {
|
||||||
|
|
|
@ -247,9 +247,7 @@ func (s *Supervisor) startFirstTunnel(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
connectedSignal *signal.Signal,
|
connectedSignal *signal.Signal,
|
||||||
) {
|
) {
|
||||||
var (
|
var err error
|
||||||
err error
|
|
||||||
)
|
|
||||||
const firstConnIndex = 0
|
const firstConnIndex = 0
|
||||||
isStaticEdge := len(s.config.EdgeAddrs) > 0
|
isStaticEdge := len(s.config.EdgeAddrs) > 0
|
||||||
defer func() {
|
defer func() {
|
||||||
|
@ -300,9 +298,7 @@ func (s *Supervisor) startTunnel(
|
||||||
index int,
|
index int,
|
||||||
connectedSignal *signal.Signal,
|
connectedSignal *signal.Signal,
|
||||||
) {
|
) {
|
||||||
var (
|
var err error
|
||||||
err error
|
|
||||||
)
|
|
||||||
defer func() {
|
defer func() {
|
||||||
s.tunnelErrors <- tunnelError{index: index, err: err}
|
s.tunnelErrors <- tunnelError{index: index, err: err}
|
||||||
}()
|
}()
|
||||||
|
|
|
@ -70,7 +70,6 @@ func RunTransfer(transferURL *url.URL, appAUD, resourceName, key, value string,
|
||||||
}
|
}
|
||||||
|
|
||||||
return resourceData, nil
|
return resourceData, nil
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildRequestURL creates a request suitable for a resource transfer.
|
// BuildRequestURL creates a request suitable for a resource transfer.
|
||||||
|
|
Loading…
Reference in New Issue