Revert "TUN-7065: Remove classic tunnel creation"

This reverts commit c24f275981.
This commit is contained in:
João Oliveirinha 2023-02-01 14:01:59 +00:00
parent 90d710e3ec
commit 62dcb8a1d1
9 changed files with 256 additions and 52 deletions

View File

@ -155,6 +155,8 @@ func action(graceShutdownC chan struct{}) cli.ActionFunc {
if isEmptyInvocation(c) {
return handleServiceMode(c, graceShutdownC)
}
tags := make(map[string]string)
tags["hostname"] = c.String("hostname")
func() {
defer sentry.Recover()
err = tunnel.TunnelCommand(c)

View File

@ -100,7 +100,6 @@ var (
routeFailMsg = fmt.Sprintf("failed to provision routing, please create it manually via Cloudflare dashboard or UI; "+
"most likely you already have a conflicting record there. You can also rerun this command with --%s to overwrite "+
"any existing DNS records for this hostname.", overwriteDNSFlag)
deprecatedClassicTunnelErr = fmt.Errorf("Classic tunnels have been deprecated, please use Named Tunnels. (https://developers.cloudflare.com/cloudflare-one/connections/connect-apps/install-and-setup/tunnel-guide/)")
)
func Flags() []cli.Flag {
@ -183,7 +182,7 @@ func TunnelCommand(c *cli.Context) error {
// Unauthenticated named tunnel on <random>.<quick-tunnels-service>.com
shouldRunQuickTunnel := c.IsSet("url") || c.IsSet("hello-world")
if !dnsProxyStandAlone(c, nil) && c.String("quick-service") != "" && shouldRunQuickTunnel {
if !dnsProxyStandAlone(c, nil) && c.String("hostname") == "" && c.String("quick-service") != "" && shouldRunQuickTunnel {
return RunQuickTunnel(sc)
}
@ -191,9 +190,9 @@ func TunnelCommand(c *cli.Context) error {
return fmt.Errorf("Use `cloudflared tunnel run` to start tunnel %s", ref)
}
// classic tunnel usage is no longer supported
// Start a classic tunnel if hostname is specified.
if c.String("hostname") != "" {
return deprecatedClassicTunnelErr
return runClassicTunnel(sc)
}
if c.IsSet("proxy-dns") {
@ -238,6 +237,11 @@ func runAdhocNamedTunnel(sc *subcommandContext, name, credentialsOutputPath stri
return nil
}
// runClassicTunnel creates a "classic" non-named tunnel
func runClassicTunnel(sc *subcommandContext) error {
return StartServer(sc.c, buildInfo, nil, sc.log)
}
func routeFromFlag(c *cli.Context) (route cfapi.HostnameRoute, ok bool) {
if hostname := c.String("hostname"); hostname != "" {
if lbPool := c.String("lb-pool"); lbPool != "" {
@ -346,6 +350,14 @@ func StartServer(
return waitToShutdown(&wg, cancel, errC, graceShutdownC, 0, log)
}
url := c.String("url")
hostname := c.String("hostname")
if url == hostname && url != "" && hostname != "" {
errText := "hostname and url shouldn't match. See --help for more information"
log.Error().Msg(errText)
return fmt.Errorf(errText)
}
logTransport := logger.CreateTransportLoggerFromContext(c, logger.EnableTerminalLog)
observer := connection.NewObserver(log, logTransport)

View File

@ -32,6 +32,7 @@ import (
"github.com/cloudflare/cloudflared/supervisor"
"github.com/cloudflare/cloudflared/tlsconfig"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
"github.com/cloudflare/cloudflared/validation"
)
const LogFieldOriginCertPath = "originCertPath"
@ -42,6 +43,8 @@ var (
serviceUrl = developerPortal + "/reference/service/"
argumentsUrl = developerPortal + "/reference/arguments/"
LogFieldHostname = "hostname"
secretFlags = [2]*altsrc.StringFlag{credentialsContentsFlag, tunnelTokenFlag}
defaultFeatures = []string{supervisor.FeatureAllowRemoteConfig, supervisor.FeatureSerializedHeaders, supervisor.FeatureDatagramV2, supervisor.FeatureQUICSupportEOF}
@ -124,7 +127,7 @@ func isSecretEnvVar(key string) bool {
}
func dnsProxyStandAlone(c *cli.Context, namedTunnel *connection.NamedTunnelProperties) bool {
return c.IsSet("proxy-dns") && (!c.IsSet("tag") && !c.IsSet("hello-world") && namedTunnel == nil)
return c.IsSet("proxy-dns") && (!c.IsSet("hostname") && !c.IsSet("tag") && !c.IsSet("hello-world") && namedTunnel == nil)
}
func findOriginCert(originCertPath string, log *zerolog.Logger) (string, error) {
@ -190,19 +193,37 @@ func prepareTunnelConfig(
observer *connection.Observer,
namedTunnel *connection.NamedTunnelProperties,
) (*supervisor.TunnelConfig, *orchestration.Config, error) {
clientID, err := uuid.NewRandom()
isNamedTunnel := namedTunnel != nil
configHostname := c.String("hostname")
hostname, err := validation.ValidateHostname(configHostname)
if err != nil {
return nil, nil, errors.Wrap(err, "can't generate connector UUID")
log.Err(err).Str(LogFieldHostname, configHostname).Msg("Invalid hostname")
return nil, nil, errors.Wrap(err, "Invalid hostname")
}
log.Info().Msgf("Generated Connector ID: %s", clientID)
clientID := c.String("id")
if !c.IsSet("id") {
clientID, err = generateRandomClientID(log)
if err != nil {
return nil, nil, err
}
}
tags, err := NewTagSliceFromCLI(c.StringSlice("tag"))
if err != nil {
log.Err(err).Msg("Tag parse failure")
return nil, nil, errors.Wrap(err, "Tag parse failure")
}
tags = append(tags, tunnelpogs.Tag{Name: "ID", Value: clientID.String()})
tags = append(tags, tunnelpogs.Tag{Name: "ID", Value: clientID})
var (
ingressRules ingress.Ingress
classicTunnel *connection.ClassicTunnelProperties
)
transportProtocol := c.String("protocol")
needPQ := c.Bool("post-quantum")
if needPQ {
if FipsEnabled {
@ -217,6 +238,13 @@ func prepareTunnelConfig(
protocolFetcher := edgediscovery.ProtocolPercentage
cfg := config.GetConfiguration()
if isNamedTunnel {
clientUUID, err := uuid.NewRandom()
if err != nil {
return nil, nil, errors.Wrap(err, "can't generate connector UUID")
}
log.Info().Msgf("Generated Connector ID: %s", clientUUID)
features := append(c.StringSlice("features"), defaultFeatures...)
if needPQ {
features = append(features, supervisor.FeaturePostQuantum)
@ -241,28 +269,48 @@ func prepareTunnelConfig(
log.Info().Msg("Will be fetching remotely managed configuration from Cloudflare API. Defaulting to protocol: quic")
}
namedTunnel.Client = tunnelpogs.ClientInfo{
ClientID: clientID[:],
ClientID: clientUUID[:],
Features: dedup(features),
Version: info.Version(),
Arch: info.OSArch(),
}
cfg := config.GetConfiguration()
ingressRules, err := ingress.ParseIngress(cfg)
ingressRules, err = ingress.ParseIngress(cfg)
if err != nil && err != ingress.ErrNoIngressRules {
return nil, nil, err
}
// Only for quick tunnels will we attempt to parse the --url flag for a tunnel ingress rule
if ingressRules.IsEmpty() && c.IsSet("url") && namedTunnel.QuickTunnelUrl != "" {
ingressRules, err = ingress.NewSingleOrigin(c, true)
if !ingressRules.IsEmpty() && c.IsSet("url") {
return nil, nil, ingress.ErrURLIncompatibleWithIngress
}
} else {
originCertPath := c.String("origincert")
originCertLog := log.With().
Str(LogFieldOriginCertPath, originCertPath).
Logger()
originCert, err := getOriginCert(originCertPath, &originCertLog)
if err != nil {
return nil, nil, errors.Wrap(err, "Error getting origin cert")
}
classicTunnel = &connection.ClassicTunnelProperties{
Hostname: hostname,
OriginCert: originCert,
// turn off use of reconnect token and auth refresh when using named tunnels
UseReconnectToken: !isNamedTunnel && c.Bool("use-reconnect-token"),
}
}
// Convert single-origin configuration into multi-origin configuration.
if ingressRules.IsEmpty() {
ingressRules, err = ingress.NewSingleOrigin(c, !isNamedTunnel)
if err != nil {
return nil, nil, err
}
}
if ingressRules.IsEmpty() {
return nil, nil, ingress.ErrNoIngressRules
}
protocolSelector, err := connection.NewProtocolSelector(transportProtocol, cfg.WarpRouting.Enabled, namedTunnel, protocolFetcher, supervisor.ResolveTTL, log, c.Bool("post-quantum"))
warpRoutingEnabled := isWarpRoutingEnabled(cfg.WarpRouting, isNamedTunnel)
protocolSelector, err := connection.NewProtocolSelector(transportProtocol, warpRoutingEnabled, namedTunnel, protocolFetcher, supervisor.ResolveTTL, log, c.Bool("post-quantum"))
if err != nil {
return nil, nil, err
}
@ -314,7 +362,7 @@ func prepareTunnelConfig(
GracePeriod: gracePeriod,
ReplaceExisting: c.Bool("force"),
OSArch: info.OSArch(),
ClientID: clientID.String(),
ClientID: clientID,
EdgeAddrs: c.StringSlice("edge"),
Region: c.String("region"),
EdgeIPVersion: edgeIPVersion,
@ -331,6 +379,7 @@ func prepareTunnelConfig(
Retries: uint(c.Int("retries")),
RunFromTerminal: isRunningFromTerminal(),
NamedTunnel: namedTunnel,
ClassicTunnel: classicTunnel,
MuxerConfig: muxerConfig,
ProtocolSelector: protocolSelector,
EdgeTLSConfigs: edgeTLSConfigs,
@ -372,6 +421,10 @@ func gracePeriod(c *cli.Context) (time.Duration, error) {
return period, nil
}
func isWarpRoutingEnabled(warpConfig config.WarpRoutingConfig, isNamedTunnel bool) bool {
return warpConfig.Enabled && isNamedTunnel
}
func isRunningFromTerminal() bool {
return terminal.IsTerminal(int(os.Stdout.Fd()))
}

View File

@ -12,12 +12,18 @@ class TestProxyDns:
def test_proxy_dns_with_named_tunnel(self, tmp_path, component_tests_config):
run_test_scenario(tmp_path, component_tests_config, CfdModes.NAMED, run_proxy_dns=True)
def test_proxy_dns_with_classic_tunnel(self, tmp_path, component_tests_config):
run_test_scenario(tmp_path, component_tests_config, CfdModes.CLASSIC, run_proxy_dns=True)
def test_proxy_dns_alone(self, tmp_path, component_tests_config):
run_test_scenario(tmp_path, component_tests_config, CfdModes.PROXY_DNS, run_proxy_dns=True)
def test_named_tunnel_alone(self, tmp_path, component_tests_config):
run_test_scenario(tmp_path, component_tests_config, CfdModes.NAMED, run_proxy_dns=False)
def test_classic_tunnel_alone(self, tmp_path, component_tests_config):
run_test_scenario(tmp_path, component_tests_config, CfdModes.CLASSIC, run_proxy_dns=False)
def run_test_scenario(tmp_path, component_tests_config, cfd_mode, run_proxy_dns):
expect_proxy_dns = run_proxy_dns
@ -27,6 +33,10 @@ def run_test_scenario(tmp_path, component_tests_config, cfd_mode, run_proxy_dns)
expect_tunnel = True
pre_args = ["tunnel"]
args = ["run"]
elif cfd_mode == CfdModes.CLASSIC:
expect_tunnel = True
pre_args = []
args = []
elif cfd_mode == CfdModes.PROXY_DNS:
expect_proxy_dns = True
pre_args = []

View File

@ -33,6 +33,13 @@ class TestReconnect:
# Repeat the test multiple times because some issues only occur after multiple reconnects
self.assert_reconnect(config, cloudflared, 5)
def test_classic_reconnect(self, tmp_path, component_tests_config):
extra_config = copy.copy(self.extra_config)
extra_config["hello-world"] = True
config = component_tests_config(additional_config=extra_config, cfd_mode=CfdModes.CLASSIC)
with start_cloudflared(tmp_path, config, cfd_args=[], new_process=True, allow_input=True, capture_output=False) as cloudflared:
self.assert_reconnect(config, cloudflared, 1)
def send_reconnect(self, cloudflared, secs):
# Although it is recommended to use the Popen.communicate method, we cannot
# use it because it blocks on reading stdout and stderr until EOF is reached

View File

@ -123,6 +123,46 @@ func (h *h2muxConnection) ServeNamedTunnel(ctx context.Context, namedTunnel *Nam
return err
}
func (h *h2muxConnection) ServeClassicTunnel(ctx context.Context, classicTunnel *ClassicTunnelProperties, credentialManager CredentialManager, registrationOptions *tunnelpogs.RegistrationOptions, connectedFuse ConnectedFuse) error {
errGroup, serveCtx := errgroup.WithContext(ctx)
errGroup.Go(func() error {
return h.serveMuxer(serveCtx)
})
errGroup.Go(func() (err error) {
defer func() {
if err == nil {
connectedFuse.Connected()
}
}()
if classicTunnel.UseReconnectToken && connectedFuse.IsConnected() {
err := h.reconnectTunnel(ctx, credentialManager, classicTunnel, registrationOptions)
if err == nil {
return nil
}
// log errors and proceed to RegisterTunnel
h.observer.log.Err(err).
Uint8(LogFieldConnIndex, h.connIndex).
Msg("Couldn't reconnect connection. Re-registering it instead.")
}
return h.registerTunnel(ctx, credentialManager, classicTunnel, registrationOptions)
})
errGroup.Go(func() error {
h.controlLoop(serveCtx, connectedFuse, false)
return nil
})
err := errGroup.Wait()
if err == errMuxerStopped {
if h.stoppedGracefully {
return nil
}
h.observer.log.Info().Uint8(LogFieldConnIndex, h.connIndex).Msg("Unexpected muxer shutdown")
}
return err
}
func (h *h2muxConnection) serveMuxer(ctx context.Context) error {
// All routines should stop when muxer finish serving. When muxer is shutdown
// gracefully, it doesn't return an error, so we need to return errMuxerShutdown

View File

@ -71,8 +71,9 @@ type Ingress struct {
}
// NewSingleOrigin constructs an Ingress set with only one rule, constructed from
// CLI parameters for quick tunnels like --url or --no-chunked-encoding.
// legacy CLI parameters like --url or --no-chunked-encoding.
func NewSingleOrigin(c *cli.Context, allowURLFromArgs bool) (Ingress, error) {
service, err := parseSingleOriginService(c, allowURLFromArgs)
if err != nil {
return Ingress{}, err

View File

@ -14,9 +14,11 @@ import (
"github.com/cloudflare/cloudflared/connection"
"github.com/cloudflare/cloudflared/edgediscovery"
"github.com/cloudflare/cloudflared/h2mux"
"github.com/cloudflare/cloudflared/orchestration"
"github.com/cloudflare/cloudflared/retry"
"github.com/cloudflare/cloudflared/signal"
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
"github.com/cloudflare/cloudflared/tunnelstate"
)
@ -55,6 +57,7 @@ type Supervisor struct {
logTransport *zerolog.Logger
reconnectCredentialManager *reconnectCredentialManager
useReconnectToken bool
reconnectCh chan ReconnectSignal
gracefulShutdownC <-chan struct{}
@ -105,6 +108,11 @@ func NewSupervisor(config *TunnelConfig, orchestrator *orchestration.Orchestrato
connAwareLogger: log,
}
useReconnectToken := false
if config.ClassicTunnel != nil {
useReconnectToken = config.ClassicTunnel.UseReconnectToken
}
return &Supervisor{
cloudflaredUUID: cloudflaredUUID,
config: config,
@ -117,6 +125,7 @@ func NewSupervisor(config *TunnelConfig, orchestrator *orchestration.Orchestrato
log: log,
logTransport: config.LogTransport,
reconnectCredentialManager: reconnectCredentialManager,
useReconnectToken: useReconnectToken,
reconnectCh: reconnectCh,
gracefulShutdownC: gracefulShutdownC,
}, nil
@ -150,6 +159,20 @@ func (s *Supervisor) Run(
backoff := retry.BackoffHandler{MaxRetries: s.config.Retries, BaseTime: tunnelRetryDuration, RetryForever: true}
var backoffTimer <-chan time.Time
refreshAuthBackoff := &retry.BackoffHandler{MaxRetries: refreshAuthMaxBackoff, BaseTime: refreshAuthRetryDuration, RetryForever: true}
var refreshAuthBackoffTimer <-chan time.Time
if s.useReconnectToken {
if timer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate); err == nil {
refreshAuthBackoffTimer = timer
} else {
s.log.Logger().Err(err).
Dur("refreshAuthRetryDuration", refreshAuthRetryDuration).
Msgf("supervisor: initial refreshAuth failed, retrying in %v", refreshAuthRetryDuration)
refreshAuthBackoffTimer = time.After(refreshAuthRetryDuration)
}
}
shuttingDown := false
for {
select {
@ -196,6 +219,16 @@ func (s *Supervisor) Run(
}
tunnelsActive += len(tunnelsWaiting)
tunnelsWaiting = nil
// Time to call Authenticate
case <-refreshAuthBackoffTimer:
newTimer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate)
if err != nil {
s.log.Logger().Err(err).Msg("supervisor: Authentication failed")
// Permanent failure. Leave the `select` without setting the
// channel to be non-null, so we'll never hit this case of the `select` again.
continue
}
refreshAuthBackoffTimer = newTimer
// Tunnel successfully connected
case <-s.nextConnectedSignal:
if !s.waitForNextTunnel(s.nextConnectedIndex) && len(tunnelsWaiting) == 0 {
@ -344,3 +377,44 @@ func (s *Supervisor) waitForNextTunnel(index int) bool {
func (s *Supervisor) unusedIPs() bool {
return s.edgeIPs.AvailableAddrs() > s.config.HAConnections
}
func (s *Supervisor) authenticate(ctx context.Context, numPreviousAttempts int) (tunnelpogs.AuthOutcome, error) {
arbitraryEdgeIP, err := s.edgeIPs.GetAddrForRPC()
if err != nil {
return nil, err
}
edgeConn, err := edgediscovery.DialEdge(ctx, dialTimeout, s.config.EdgeTLSConfigs[connection.H2mux], arbitraryEdgeIP.TCP)
if err != nil {
return nil, err
}
defer edgeConn.Close()
handler := h2mux.MuxedStreamFunc(func(*h2mux.MuxedStream) error {
// This callback is invoked by h2mux when the edge initiates a stream.
return nil // noop
})
muxerConfig := s.config.MuxerConfig.H2MuxerConfig(handler, s.logTransport)
muxer, err := h2mux.Handshake(edgeConn, edgeConn, *muxerConfig, h2mux.ActiveStreams)
if err != nil {
return nil, err
}
go muxer.Serve(ctx)
defer func() {
// If we don't wait for the muxer shutdown here, edgeConn.Close() runs before the muxer connections are done,
// and the user sees log noise: "error writing data", "connection closed unexpectedly"
<-muxer.Shutdown()
}()
stream, err := muxer.OpenRPCStream(ctx)
if err != nil {
return nil, err
}
rpcClient := connection.NewTunnelServerClient(ctx, stream, s.log.Logger())
defer rpcClient.Close()
const arbitraryConnectionID = uint8(0)
registrationOptions := s.config.registrationOptions(arbitraryConnectionID, edgeConn.LocalAddr().String(), s.cloudflaredUUID)
registrationOptions.NumPreviousAttempts = uint8(numPreviousAttempts)
return rpcClient.Authenticate(ctx, s.config.ClassicTunnel, registrationOptions)
}

View File

@ -68,6 +68,7 @@ type TunnelConfig struct {
PQKexIdx int
NamedTunnel *connection.NamedTunnelProperties
ClassicTunnel *connection.ClassicTunnelProperties
MuxerConfig *connection.MuxerConfig
ProtocolSelector connection.ProtocolSelector
EdgeTLSConfigs map[connection.Protocol]*tls.Config
@ -576,8 +577,12 @@ func (e *EdgeTunnelServer) serveH2mux(
errGroup, serveCtx := errgroup.WithContext(ctx)
errGroup.Go(func() error {
if e.config.NamedTunnel != nil {
connOptions := e.config.connectionOptions(edgeConn.LocalAddr().String(), uint8(connectedFuse.backoff.Retries()))
return handler.ServeNamedTunnel(serveCtx, e.config.NamedTunnel, connOptions, connectedFuse)
}
registrationOptions := e.config.registrationOptions(connIndex, edgeConn.LocalAddr().String(), e.cloudflaredUUID)
return handler.ServeClassicTunnel(serveCtx, e.config.ClassicTunnel, e.credentialManager, registrationOptions, connectedFuse)
})
errGroup.Go(func() error {