TUN-3471: Add structured log context to logs
This commit is contained in:
parent
abab78730d
commit
55bf904689
|
@ -17,6 +17,8 @@ import (
|
|||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
const LogFieldOriginURL = "originURL"
|
||||
|
||||
type StartOptions struct {
|
||||
OriginURL string
|
||||
Headers http.Header
|
||||
|
|
|
@ -49,7 +49,7 @@ func NewWSConnection(log *zerolog.Logger, isSocks bool) Connection {
|
|||
func (ws *Websocket) ServeStream(options *StartOptions, conn io.ReadWriter) error {
|
||||
wsConn, err := createWebsocketStream(options, ws.log)
|
||||
if err != nil {
|
||||
ws.log.Error().Msgf("failed to connect to %s with error: %s", options.OriginURL, err)
|
||||
ws.log.Err(err).Str(LogFieldOriginURL, options.OriginURL).Msg("failed to connect to origin")
|
||||
return err
|
||||
}
|
||||
defer wsConn.Close()
|
||||
|
|
|
@ -15,6 +15,10 @@ import (
|
|||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
LogFieldHost = "host"
|
||||
)
|
||||
|
||||
// StartForwarder starts a client side websocket forward
|
||||
func StartForwarder(forwarder config.Forwarder, shutdown <-chan struct{}, log *zerolog.Logger) error {
|
||||
validURL, err := validation.ValidateUrl(forwarder.Listener)
|
||||
|
@ -44,7 +48,7 @@ func StartForwarder(forwarder config.Forwarder, shutdown <-chan struct{}, log *z
|
|||
// we could add a cmd line variable for this bool if we want the SOCK5 server to be on the client side
|
||||
wsConn := carrier.NewWSConnection(log, false)
|
||||
|
||||
log.Info().Msgf("Start Websocket listener on: %s", validURL.Host)
|
||||
log.Info().Str(LogFieldHost, validURL.Host).Msg("Start Websocket listener")
|
||||
return carrier.StartForwarder(wsConn, validURL.Host, shutdown, options)
|
||||
}
|
||||
|
||||
|
@ -88,14 +92,14 @@ func ssh(c *cli.Context) error {
|
|||
if c.NArg() > 0 || c.IsSet(sshURLFlag) {
|
||||
forwarder, err := config.ValidateUrl(c, true)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Error validating origin URL: %s", err)
|
||||
log.Err(err).Msg("Error validating origin URL")
|
||||
return errors.Wrap(err, "error validating origin URL")
|
||||
}
|
||||
|
||||
log.Info().Msgf("Start Websocket listener on: %s", forwarder.Host)
|
||||
log.Info().Str(LogFieldHost, forwarder.Host).Msg("Start Websocket listener")
|
||||
err = carrier.StartForwarder(wsConn, forwarder.Host, shutdownC, options)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Error on Websocket listener: %s", err)
|
||||
log.Err(err).Msg("Error on Websocket listener")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -212,11 +212,11 @@ func login(c *cli.Context) error {
|
|||
rawURL := ensureURLScheme(args.First())
|
||||
appURL, err := url.Parse(rawURL)
|
||||
if args.Len() < 1 || err != nil {
|
||||
log.Error().Msgf("Please provide the url of the Access application\n")
|
||||
log.Error().Msg("Please provide the url of the Access application")
|
||||
return err
|
||||
}
|
||||
if err := verifyTokenAtEdge(appURL, c, log); err != nil {
|
||||
log.Error().Msgf("Could not verify token: %s", err)
|
||||
log.Err(err).Msg("Could not verify token")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -270,7 +270,7 @@ func curl(c *cli.Context) error {
|
|||
}
|
||||
tok, err = token.FetchToken(appURL, log)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Failed to refresh token: %s", err)
|
||||
log.Err(err).Msg("Failed to refresh token")
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,8 +7,13 @@ import (
|
|||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
// ResolverServiceType is used to identify what kind of overwatch service this is
|
||||
const ResolverServiceType = "resolver"
|
||||
const (
|
||||
// ResolverServiceType is used to identify what kind of overwatch service this is
|
||||
ResolverServiceType = "resolver"
|
||||
|
||||
LogFieldResolverAddress = "resolverAddress"
|
||||
LogFieldResolverPort = "resolverPort"
|
||||
)
|
||||
|
||||
// ResolverService is used to wrap the tunneldns package's DNS over HTTP
|
||||
// into a service model for the overwatch package.
|
||||
|
@ -65,10 +70,16 @@ func (s *ResolverService) Run() error {
|
|||
return err
|
||||
}
|
||||
<-readySignal
|
||||
s.log.Info().Msgf("start resolver on: %s:%d", s.resolver.AddressOrDefault(), s.resolver.PortOrDefault())
|
||||
|
||||
resolverLog := s.log.With().
|
||||
Str(LogFieldResolverAddress, s.resolver.AddressOrDefault()).
|
||||
Uint16(LogFieldResolverPort, s.resolver.PortOrDefault()).
|
||||
Logger()
|
||||
|
||||
resolverLog.Info().Msg("Starting resolver")
|
||||
|
||||
// wait for shutdown signal
|
||||
<-s.shutdown
|
||||
s.log.Info().Msgf("shutdown on: %s:%d", s.resolver.AddressOrDefault(), s.resolver.PortOrDefault())
|
||||
resolverLog.Info().Msg("Shutting down resolver")
|
||||
return l.Stop()
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ func readConfigFromPath(configPath string, log *zerolog.Logger) (Root, error) {
|
|||
func (m *FileManager) WatcherItemDidChange(filepath string) {
|
||||
config, err := m.GetConfig()
|
||||
if err != nil {
|
||||
m.log.Error().Msgf("Failed to read new config: %s", err)
|
||||
m.log.Err(err).Msg("Failed to read new config")
|
||||
return
|
||||
}
|
||||
m.log.Info().Msg("Config file has been updated")
|
||||
|
@ -108,5 +108,5 @@ func (m *FileManager) WatcherItemDidChange(filepath string) {
|
|||
|
||||
// WatcherDidError notifies of errors with the file watcher
|
||||
func (m *FileManager) WatcherDidError(err error) {
|
||||
m.log.Error().Msgf("Config watcher encountered an error: %s", err)
|
||||
m.log.Err(err).Msg("Config watcher encountered an error")
|
||||
}
|
||||
|
|
|
@ -230,7 +230,7 @@ func installLinuxService(c *cli.Context) error {
|
|||
userConfigFile := filepath.Base(c.String("config"))
|
||||
userCredentialFile := config.DefaultCredentialFile
|
||||
if err = copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile, log); err != nil {
|
||||
log.Error().Msgf("Failed to copy user configuration: %s. Before running the service, ensure that %s contains two files, %s and %s", err,
|
||||
log.Err(err).Msgf("Failed to copy user configuration. Before running the service, ensure that %s contains two files, %s and %s",
|
||||
serviceConfigDir, serviceCredentialFile, serviceConfigFile)
|
||||
return err
|
||||
}
|
||||
|
@ -283,30 +283,30 @@ func installSystemd(templateArgs *ServiceTemplateArgs, log *zerolog.Logger) erro
|
|||
for _, serviceTemplate := range systemdTemplates {
|
||||
err := serviceTemplate.Generate(templateArgs)
|
||||
if err != nil {
|
||||
log.Error().Msgf("error generating service template: %s", err)
|
||||
log.Err(err).Msg("error generating service template")
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := runCommand("systemctl", "enable", "cloudflared.service"); err != nil {
|
||||
log.Error().Msgf("systemctl enable cloudflared.service error: %s", err)
|
||||
log.Err(err).Msg("systemctl enable cloudflared.service error")
|
||||
return err
|
||||
}
|
||||
if err := runCommand("systemctl", "start", "cloudflared-update.timer"); err != nil {
|
||||
log.Error().Msgf("systemctl start cloudflared-update.timer error: %s", err)
|
||||
log.Err(err).Msg("systemctl start cloudflared-update.timer error")
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("systemctl daemon-reload")
|
||||
log.Info().Msg("systemctl daemon-reload")
|
||||
return runCommand("systemctl", "daemon-reload")
|
||||
}
|
||||
|
||||
func installSysv(templateArgs *ServiceTemplateArgs, log *zerolog.Logger) error {
|
||||
confPath, err := sysvTemplate.ResolvePath()
|
||||
if err != nil {
|
||||
log.Error().Msgf("error resolving system path: %s", err)
|
||||
log.Err(err).Msg("error resolving system path")
|
||||
return err
|
||||
}
|
||||
if err := sysvTemplate.Generate(templateArgs); err != nil {
|
||||
log.Error().Msgf("error generating system template: %s", err)
|
||||
log.Err(err).Msg("error generating system template")
|
||||
return err
|
||||
}
|
||||
for _, i := range [...]string{"2", "3", "4", "5"} {
|
||||
|
@ -327,36 +327,36 @@ func uninstallLinuxService(c *cli.Context) error {
|
|||
|
||||
switch {
|
||||
case isSystemd():
|
||||
log.Info().Msgf("Using Systemd")
|
||||
log.Info().Msg("Using Systemd")
|
||||
return uninstallSystemd(log)
|
||||
default:
|
||||
log.Info().Msgf("Using SysV")
|
||||
log.Info().Msg("Using SysV")
|
||||
return uninstallSysv(log)
|
||||
}
|
||||
}
|
||||
|
||||
func uninstallSystemd(log *zerolog.Logger) error {
|
||||
if err := runCommand("systemctl", "disable", "cloudflared.service"); err != nil {
|
||||
log.Error().Msgf("systemctl disable cloudflared.service error: %s", err)
|
||||
log.Err(err).Msg("systemctl disable cloudflared.service error")
|
||||
return err
|
||||
}
|
||||
if err := runCommand("systemctl", "stop", "cloudflared-update.timer"); err != nil {
|
||||
log.Error().Msgf("systemctl stop cloudflared-update.timer error: %s", err)
|
||||
log.Err(err).Msg("systemctl stop cloudflared-update.timer error")
|
||||
return err
|
||||
}
|
||||
for _, serviceTemplate := range systemdTemplates {
|
||||
if err := serviceTemplate.Remove(); err != nil {
|
||||
log.Error().Msgf("error removing service template: %s", err)
|
||||
log.Err(err).Msg("error removing service template")
|
||||
return err
|
||||
}
|
||||
}
|
||||
log.Info().Msgf("Successfully uninstall cloudflared service")
|
||||
log.Info().Msgf("Successfully uninstalled cloudflared service from systemd")
|
||||
return nil
|
||||
}
|
||||
|
||||
func uninstallSysv(log *zerolog.Logger) error {
|
||||
if err := sysvTemplate.Remove(); err != nil {
|
||||
log.Error().Msgf("error removing service template: %s", err)
|
||||
log.Err(err).Msg("error removing service template")
|
||||
return err
|
||||
}
|
||||
for _, i := range [...]string{"2", "3", "4", "5"} {
|
||||
|
@ -369,6 +369,6 @@ func uninstallSysv(log *zerolog.Logger) error {
|
|||
continue
|
||||
}
|
||||
}
|
||||
log.Info().Msgf("Successfully uninstall cloudflared service")
|
||||
log.Info().Msgf("Successfully uninstalled cloudflared service from sysv")
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -110,44 +110,44 @@ func installLaunchd(c *cli.Context) error {
|
|||
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||
|
||||
if isRootUser() {
|
||||
log.Info().Msgf("Installing Argo Tunnel client as a system launch daemon. " +
|
||||
log.Info().Msg("Installing Argo Tunnel client as a system launch daemon. " +
|
||||
"Argo Tunnel client will run at boot")
|
||||
} else {
|
||||
log.Info().Msgf("Installing Argo Tunnel client as an user launch agent. " +
|
||||
log.Info().Msg("Installing Argo Tunnel client as an user launch agent. " +
|
||||
"Note that Argo Tunnel client will only run when the user is logged in. " +
|
||||
"If you want to run Argo Tunnel client at boot, install with root permission. " +
|
||||
"For more information, visit https://developers.cloudflare.com/argo-tunnel/reference/service/")
|
||||
}
|
||||
etPath, err := os.Executable()
|
||||
if err != nil {
|
||||
log.Error().Msgf("Error determining executable path: %s", err)
|
||||
log.Err(err).Msg("Error determining executable path")
|
||||
return fmt.Errorf("Error determining executable path: %v", err)
|
||||
}
|
||||
installPath, err := installPath()
|
||||
if err != nil {
|
||||
log.Error().Msgf("Error determining install path: %s", err)
|
||||
log.Err(err).Msg("Error determining install path")
|
||||
return errors.Wrap(err, "Error determining install path")
|
||||
}
|
||||
stdoutPath, err := stdoutPath()
|
||||
if err != nil {
|
||||
log.Error().Msgf("error determining stdout path: %s", err)
|
||||
log.Err(err).Msg("error determining stdout path")
|
||||
return errors.Wrap(err, "error determining stdout path")
|
||||
}
|
||||
stderrPath, err := stderrPath()
|
||||
if err != nil {
|
||||
log.Error().Msgf("error determining stderr path: %s", err)
|
||||
log.Err(err).Msg("error determining stderr path")
|
||||
return errors.Wrap(err, "error determining stderr path")
|
||||
}
|
||||
launchdTemplate := newLaunchdTemplate(installPath, stdoutPath, stderrPath)
|
||||
templateArgs := ServiceTemplateArgs{Path: etPath}
|
||||
err = launchdTemplate.Generate(&templateArgs)
|
||||
if err != nil {
|
||||
log.Error().Msgf("error generating launchd template: %s", err)
|
||||
log.Err(err).Msg("error generating launchd template")
|
||||
return err
|
||||
}
|
||||
plistPath, err := launchdTemplate.ResolvePath()
|
||||
if err != nil {
|
||||
log.Error().Msgf("error resolving launchd template path: %s", err)
|
||||
log.Err(err).Msg("error resolving launchd template path")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -159,9 +159,9 @@ func uninstallLaunchd(c *cli.Context) error {
|
|||
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||
|
||||
if isRootUser() {
|
||||
log.Info().Msgf("Uninstalling Argo Tunnel as a system launch daemon")
|
||||
log.Info().Msg("Uninstalling Argo Tunnel as a system launch daemon")
|
||||
} else {
|
||||
log.Info().Msgf("Uninstalling Argo Tunnel as an user launch agent")
|
||||
log.Info().Msg("Uninstalling Argo Tunnel as an user launch agent")
|
||||
}
|
||||
installPath, err := installPath()
|
||||
if err != nil {
|
||||
|
@ -178,12 +178,12 @@ func uninstallLaunchd(c *cli.Context) error {
|
|||
launchdTemplate := newLaunchdTemplate(installPath, stdoutPath, stderrPath)
|
||||
plistPath, err := launchdTemplate.ResolvePath()
|
||||
if err != nil {
|
||||
log.Error().Msgf("error resolving launchd template path: %s", err)
|
||||
log.Err(err).Msg("error resolving launchd template path")
|
||||
return err
|
||||
}
|
||||
err = runCommand("launchctl", "unload", plistPath)
|
||||
if err != nil {
|
||||
log.Error().Msgf("error unloading: %s", err)
|
||||
log.Err(err).Msg("error unloading")
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -189,28 +189,28 @@ func handleServiceMode(c *cli.Context, shutdownC chan struct{}) error {
|
|||
// start the main run loop that reads from the config file
|
||||
f, err := watcher.NewFile()
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot load config file: %s", err)
|
||||
log.Err(err).Msg("Cannot load config file")
|
||||
return err
|
||||
}
|
||||
|
||||
configPath := config.FindOrCreateConfigPath()
|
||||
configManager, err := config.NewFileManager(f, configPath, log)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot setup config file for monitoring: %s", err)
|
||||
log.Err(err).Msg("Cannot setup config file for monitoring")
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("monitoring config file at: %s", configPath)
|
||||
|
||||
serviceCallback := func(t string, name string, err error) {
|
||||
if err != nil {
|
||||
log.Error().Msgf("%s service: %s encountered an error: %s", t, name, err)
|
||||
log.Err(err).Msgf("%s service: %s encountered an error", t, name)
|
||||
}
|
||||
}
|
||||
serviceManager := overwatch.NewAppManager(serviceCallback)
|
||||
|
||||
appService := NewAppService(configManager, serviceManager, shutdownC, log)
|
||||
if err := appService.Run(); err != nil {
|
||||
log.Error().Msgf("Failed to start app service: %s", err)
|
||||
log.Err(err).Msg("Failed to start app service")
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -106,7 +106,7 @@ func transferRequest(requestURL string, log *zerolog.Logger) ([]byte, string, er
|
|||
return nil, "", err
|
||||
} else if len(buf) > 0 {
|
||||
if err := putSuccess(client, requestURL); err != nil {
|
||||
log.Error().Msgf("Failed to update resource success: %s", err)
|
||||
log.Err(err).Msg("Failed to update resource success")
|
||||
}
|
||||
return buf, key, nil
|
||||
}
|
||||
|
|
|
@ -78,6 +78,12 @@ const (
|
|||
|
||||
debugLevelWarning = "At debug level, request URL, method, protocol, content legnth and header will be logged. " +
|
||||
"Response status, content length and header will also be logged in debug level."
|
||||
|
||||
LogFieldCommand = "command"
|
||||
LogFieldExpandedPath = "expandedPath"
|
||||
LogFieldPIDPathname = "pidPathname"
|
||||
LogFieldTmpTraceFilename = "tmpTraceFilename"
|
||||
LogFieldTraceOutputFilepath = "traceOutputFilepath"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -173,14 +179,14 @@ func runAdhocNamedTunnel(sc *subcommandContext, name string) error {
|
|||
return errors.Wrap(err, "failed to create tunnel")
|
||||
}
|
||||
} else {
|
||||
sc.log.Info().Msgf("Tunnel already created with ID %s", tunnel.ID)
|
||||
sc.log.Info().Str(LogFieldTunnelID, tunnel.ID.String()).Msg("Reusing existing tunnel with this name")
|
||||
}
|
||||
|
||||
if r, ok := routeFromFlag(sc.c); ok {
|
||||
if res, err := sc.route(tunnel.ID, r); err != nil {
|
||||
sc.log.Error().Msgf("failed to create route, please create it manually. err: %v.", err)
|
||||
sc.log.Err(err).Msg("failed to create route, please create it manually")
|
||||
} else {
|
||||
sc.log.Info().Msgf(res.SuccessSummary())
|
||||
sc.log.Info().Msg(res.SuccessSummary())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -229,25 +235,31 @@ func StartServer(
|
|||
if c.IsSet("trace-output") {
|
||||
tmpTraceFile, err := ioutil.TempFile("", "trace")
|
||||
if err != nil {
|
||||
log.Error().Msgf("Failed to create new temporary file to save trace output: %s", err)
|
||||
log.Err(err).Msg("Failed to create new temporary file to save trace output")
|
||||
}
|
||||
|
||||
traceLog := log.With().Str(LogFieldTmpTraceFilename, tmpTraceFile.Name()).Logger()
|
||||
|
||||
defer func() {
|
||||
if err := tmpTraceFile.Close(); err != nil {
|
||||
log.Error().Msgf("Failed to close trace output file %s with error: %s", tmpTraceFile.Name(), err)
|
||||
traceLog.Err(err).Msg("Failed to close temporary trace output file")
|
||||
}
|
||||
if err := os.Rename(tmpTraceFile.Name(), c.String("trace-output")); err != nil {
|
||||
log.Error().Msgf("Failed to rename temporary trace output file %s to %s with error: %s", tmpTraceFile.Name(), c.String("trace-output"), err)
|
||||
traceOutputFilepath := c.String("trace-output")
|
||||
if err := os.Rename(tmpTraceFile.Name(), traceOutputFilepath); err != nil {
|
||||
traceLog.
|
||||
Err(err).
|
||||
Str(LogFieldTraceOutputFilepath, traceOutputFilepath).
|
||||
Msg("Failed to rename temporary trace output file")
|
||||
} else {
|
||||
err := os.Remove(tmpTraceFile.Name())
|
||||
if err != nil {
|
||||
log.Error().Msgf("Failed to remove the temporary trace file %s with error: %s", tmpTraceFile.Name(), err)
|
||||
traceLog.Err(err).Msg("Failed to remove the temporary trace file")
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
if err := trace.Start(tmpTraceFile); err != nil {
|
||||
log.Error().Msgf("Failed to start trace: %s", err)
|
||||
traceLog.Err(err).Msg("Failed to start trace")
|
||||
return errors.Wrap(err, "Error starting tracing")
|
||||
}
|
||||
defer trace.Stop()
|
||||
|
@ -277,7 +289,7 @@ func StartServer(
|
|||
|
||||
cloudflaredID, err := uuid.NewRandom()
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot generate cloudflared ID: %s", err)
|
||||
log.Err(err).Msg("Cannot generate cloudflared ID")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -289,7 +301,8 @@ func StartServer(
|
|||
|
||||
// update needs to be after DNS proxy is up to resolve equinox server address
|
||||
if updater.IsAutoupdateEnabled(c, log) {
|
||||
log.Info().Msgf("Autoupdate frequency is set to %v", c.Duration("autoupdate-freq"))
|
||||
autoupdateFreq := c.Duration("autoupdate-freq")
|
||||
log.Info().Dur("autoupdateFreq", autoupdateFreq).Msg("Autoupdate frequency is set")
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
|
@ -323,13 +336,13 @@ func StartServer(
|
|||
}
|
||||
tunnelConfig, ingressRules, err := prepareTunnelConfig(c, buildInfo, version, log, transportLog, namedTunnel, isUIEnabled, eventChannels)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Couldn't start tunnel: %v", err)
|
||||
log.Err(err).Msg("Couldn't start tunnel")
|
||||
return err
|
||||
}
|
||||
|
||||
metricsListener, err := listeners.Listen("tcp", c.String("metrics"))
|
||||
if err != nil {
|
||||
log.Error().Msgf("Error opening metrics server listener: %s", err)
|
||||
log.Err(err).Msg("Error opening metrics server listener")
|
||||
return errors.Wrap(err, "Error opening metrics server listener")
|
||||
}
|
||||
defer metricsListener.Close()
|
||||
|
@ -404,7 +417,7 @@ func waitToShutdown(wg *sync.WaitGroup,
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
log.Error().Msgf("Quitting due to error: %s", err)
|
||||
log.Err(err).Msg("Quitting due to error")
|
||||
} else {
|
||||
log.Info().Msg("Quitting...")
|
||||
}
|
||||
|
@ -422,16 +435,16 @@ func notifySystemd(waitForSignal *signal.Signal) {
|
|||
daemon.SdNotify(false, "READY=1")
|
||||
}
|
||||
|
||||
func writePidFile(waitForSignal *signal.Signal, pidFile string, log *zerolog.Logger) {
|
||||
func writePidFile(waitForSignal *signal.Signal, pidPathname string, log *zerolog.Logger) {
|
||||
<-waitForSignal.Wait()
|
||||
expandedPath, err := homedir.Expand(pidFile)
|
||||
expandedPath, err := homedir.Expand(pidPathname)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Unable to expand %s, try to use absolute path in --pidfile: %s", pidFile, err)
|
||||
log.Err(err).Str(LogFieldPIDPathname, pidPathname).Msg("Unable to expand the path, try to use absolute path in --pidfile")
|
||||
return
|
||||
}
|
||||
file, err := os.Create(expandedPath)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Unable to write pid to %s: %s", expandedPath, err)
|
||||
log.Err(err).Str(LogFieldExpandedPath, expandedPath).Msg("Unable to write pid")
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
@ -991,9 +1004,14 @@ func configureProxyDNSFlags(shouldHide bool) []cli.Flag {
|
|||
Hidden: shouldHide,
|
||||
}),
|
||||
altsrc.NewStringSliceFlag(&cli.StringSliceFlag{
|
||||
Name: "proxy-dns-bootstrap",
|
||||
Usage: "bootstrap endpoint URL, you can specify multiple endpoints for redundancy.",
|
||||
Value: cli.NewStringSlice("https://162.159.36.1/dns-query", "https://162.159.46.1/dns-query", "https://[2606:4700:4700::1111]/dns-query", "https://[2606:4700:4700::1001]/dns-query"),
|
||||
Name: "proxy-dns-bootstrap",
|
||||
Usage: "bootstrap endpoint URL, you can specify multiple endpoints for redundancy.",
|
||||
Value: cli.NewStringSlice(
|
||||
"https://162.159.36.1/dns-query",
|
||||
"https://162.159.46.1/dns-query",
|
||||
"https://[2606:4700:4700::1111]/dns-query",
|
||||
"https://[2606:4700:4700::1001]/dns-query",
|
||||
),
|
||||
EnvVars: []string{"TUNNEL_DNS_BOOTSTRAP"},
|
||||
Hidden: shouldHide,
|
||||
}),
|
||||
|
@ -1022,7 +1040,7 @@ func stdinControl(reconnectCh chan origin.ReconnectSignal, log *zerolog.Logger)
|
|||
log.Info().Msgf("Sending reconnect signal %+v", reconnect)
|
||||
reconnectCh <- reconnect
|
||||
default:
|
||||
log.Info().Msgf("Unknown command: %s", command)
|
||||
log.Info().Str(LogFieldCommand, command).Msg("Unknown command")
|
||||
fallthrough
|
||||
case "help":
|
||||
log.Info().Msg(`Supported command:
|
||||
|
|
|
@ -27,11 +27,15 @@ import (
|
|||
"golang.org/x/crypto/ssh/terminal"
|
||||
)
|
||||
|
||||
const LogFieldOriginCertPath = "originCertPath"
|
||||
|
||||
var (
|
||||
developerPortal = "https://developers.cloudflare.com/argo-tunnel"
|
||||
quickStartUrl = developerPortal + "/quickstart/quickstart/"
|
||||
serviceUrl = developerPortal + "/reference/service/"
|
||||
argumentsUrl = developerPortal + "/reference/arguments/"
|
||||
|
||||
LogFieldHostname = "hostname"
|
||||
)
|
||||
|
||||
// returns the first path that contains a cert.pem file. If none of the DefaultConfigSearchDirectories
|
||||
|
@ -92,29 +96,28 @@ func dnsProxyStandAlone(c *cli.Context) bool {
|
|||
return c.IsSet("proxy-dns") && (!c.IsSet("hostname") && !c.IsSet("tag") && !c.IsSet("hello-world"))
|
||||
}
|
||||
|
||||
func findOriginCert(c *cli.Context, log *zerolog.Logger) (string, error) {
|
||||
originCertPath := c.String("origincert")
|
||||
func findOriginCert(originCertPath string, log *zerolog.Logger) (string, error) {
|
||||
if originCertPath == "" {
|
||||
log.Info().Msgf("Cannot determine default origin certificate path. No file %s in %v", config.DefaultCredentialFile, config.DefaultConfigSearchDirectories())
|
||||
if isRunningFromTerminal() {
|
||||
log.Error().Msgf("You need to specify the origin certificate path with --origincert option, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", argumentsUrl)
|
||||
return "", fmt.Errorf("Client didn't specify origincert path when running from terminal")
|
||||
return "", fmt.Errorf("client didn't specify origincert path when running from terminal")
|
||||
} else {
|
||||
log.Error().Msgf("You need to specify the origin certificate path by specifying the origincert option in the configuration file, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", serviceUrl)
|
||||
return "", fmt.Errorf("Client didn't specify origincert path")
|
||||
return "", fmt.Errorf("client didn't specify origincert path")
|
||||
}
|
||||
}
|
||||
var err error
|
||||
originCertPath, err = homedir.Expand(originCertPath)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot resolve path %s: %s", originCertPath, err)
|
||||
return "", fmt.Errorf("Cannot resolve path %s", originCertPath)
|
||||
log.Err(err).Msgf("Cannot resolve origin certificate path")
|
||||
return "", fmt.Errorf("cannot resolve path %s", originCertPath)
|
||||
}
|
||||
// Check that the user has acquired a certificate using the login command
|
||||
ok, err := config.FileExists(originCertPath)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot check if origin cert exists at path %s", originCertPath)
|
||||
return "", fmt.Errorf("Cannot check if origin cert exists at path %s", originCertPath)
|
||||
return "", fmt.Errorf("cannot check if origin cert exists at path %s", originCertPath)
|
||||
}
|
||||
if !ok {
|
||||
log.Error().Msgf(`Cannot find a valid certificate for your origin at the path:
|
||||
|
@ -126,29 +129,26 @@ If you don't have a certificate signed by Cloudflare, run the command:
|
|||
|
||||
%s login
|
||||
`, originCertPath, os.Args[0])
|
||||
return "", fmt.Errorf("Cannot find a valid certificate at the path %s", originCertPath)
|
||||
return "", fmt.Errorf("cannot find a valid certificate at the path %s", originCertPath)
|
||||
}
|
||||
|
||||
return originCertPath, nil
|
||||
}
|
||||
|
||||
func readOriginCert(originCertPath string, log *zerolog.Logger) ([]byte, error) {
|
||||
log.Debug().Msgf("Reading origin cert from %s", originCertPath)
|
||||
|
||||
func readOriginCert(originCertPath string) ([]byte, error) {
|
||||
// Easier to send the certificate as []byte via RPC than decoding it at this point
|
||||
originCert, err := ioutil.ReadFile(originCertPath)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot read %s to load origin certificate: %s", originCertPath, err)
|
||||
return nil, fmt.Errorf("Cannot read %s to load origin certificate", originCertPath)
|
||||
return nil, fmt.Errorf("cannot read %s to load origin certificate", originCertPath)
|
||||
}
|
||||
return originCert, nil
|
||||
}
|
||||
|
||||
func getOriginCert(c *cli.Context, log *zerolog.Logger) ([]byte, error) {
|
||||
if originCertPath, err := findOriginCert(c, log); err != nil {
|
||||
func getOriginCert(originCertPath string, log *zerolog.Logger) ([]byte, error) {
|
||||
if originCertPath, err := findOriginCert(originCertPath, log); err != nil {
|
||||
return nil, err
|
||||
} else {
|
||||
return readOriginCert(originCertPath, log)
|
||||
return readOriginCert(originCertPath)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -164,9 +164,10 @@ func prepareTunnelConfig(
|
|||
) (*origin.TunnelConfig, ingress.Ingress, error) {
|
||||
isNamedTunnel := namedTunnel != nil
|
||||
|
||||
hostname, err := validation.ValidateHostname(c.String("hostname"))
|
||||
configHostname := c.String("hostname")
|
||||
hostname, err := validation.ValidateHostname(configHostname)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Invalid hostname: %s", err)
|
||||
log.Err(err).Str(LogFieldHostname, configHostname).Msg("Invalid hostname")
|
||||
return nil, ingress.Ingress{}, errors.Wrap(err, "Invalid hostname")
|
||||
}
|
||||
isFreeTunnel := hostname == ""
|
||||
|
@ -180,7 +181,7 @@ func prepareTunnelConfig(
|
|||
|
||||
tags, err := NewTagSliceFromCLI(c.StringSlice("tag"))
|
||||
if err != nil {
|
||||
log.Error().Msgf("Tag parse failure: %s", err)
|
||||
log.Err(err).Msg("Tag parse failure")
|
||||
return nil, ingress.Ingress{}, errors.Wrap(err, "Tag parse failure")
|
||||
}
|
||||
|
||||
|
@ -188,7 +189,12 @@ func prepareTunnelConfig(
|
|||
|
||||
var originCert []byte
|
||||
if !isFreeTunnel {
|
||||
originCert, err = getOriginCert(c, log)
|
||||
originCertPath := c.String("origincert")
|
||||
originCertLog := log.With().
|
||||
Str(LogFieldOriginCertPath, originCertPath).
|
||||
Logger()
|
||||
|
||||
originCert, err = getOriginCert(originCertPath, &originCertLog)
|
||||
if err != nil {
|
||||
return nil, ingress.Ingress{}, errors.Wrap(err, "Error getting origin cert")
|
||||
}
|
||||
|
|
|
@ -56,9 +56,13 @@ func newSearchByID(id uuid.UUID, c *cli.Context, log *zerolog.Logger, fs fileSys
|
|||
}
|
||||
|
||||
func (s searchByID) Path() (string, error) {
|
||||
originCertPath := s.c.String("origincert")
|
||||
originCertLog := s.log.With().
|
||||
Str(LogFieldOriginCertPath, originCertPath).
|
||||
Logger()
|
||||
|
||||
// Fallback to look for tunnel credentials in the origin cert directory
|
||||
if originCertPath, err := findOriginCert(s.c, s.log); err == nil {
|
||||
if originCertPath, err := findOriginCert(originCertPath, &originCertLog); err == nil {
|
||||
originCertDir := filepath.Dir(originCertPath)
|
||||
if filePath, err := tunnelFilePath(s.id, originCertDir); err == nil {
|
||||
if s.fs.validFilePath(filePath) {
|
||||
|
@ -75,5 +79,5 @@ func (s searchByID) Path() (string, error) {
|
|||
}
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("Tunnel credentials file not found")
|
||||
return "", fmt.Errorf("tunnel credentials file not found")
|
||||
}
|
||||
|
|
|
@ -9,6 +9,8 @@ import (
|
|||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
const LogFieldSignal = "signal"
|
||||
|
||||
// waitForSignal notifies all routines to shutdownC immediately by closing the
|
||||
// shutdownC when one of the routines in main exits, or when this process receives
|
||||
// SIGTERM/SIGINT
|
||||
|
@ -19,11 +21,11 @@ func waitForSignal(errC chan error, shutdownC chan struct{}, log *zerolog.Logger
|
|||
|
||||
select {
|
||||
case err := <-errC:
|
||||
log.Info().Msgf("terminating due to error: %v", err)
|
||||
log.Err(err).Msg("terminating due to error")
|
||||
close(shutdownC)
|
||||
return err
|
||||
case s := <-signals:
|
||||
log.Info().Msgf("terminating due to signal %s", s)
|
||||
log.Info().Str(LogFieldSignal, s.String()).Msg("terminating due to signal")
|
||||
close(shutdownC)
|
||||
case <-shutdownC:
|
||||
}
|
||||
|
|
|
@ -93,11 +93,16 @@ func (sc *subcommandContext) client() (tunnelstore.Client, error) {
|
|||
|
||||
func (sc *subcommandContext) credential() (*userCredential, error) {
|
||||
if sc.userCredential == nil {
|
||||
originCertPath, err := findOriginCert(sc.c, sc.log)
|
||||
originCertPath := sc.c.String("origincert")
|
||||
originCertLog := sc.log.With().
|
||||
Str(LogFieldOriginCertPath, originCertPath).
|
||||
Logger()
|
||||
|
||||
originCertPath, err := findOriginCert(originCertPath, &originCertLog)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "Error locating origin cert")
|
||||
}
|
||||
blocks, err := readOriginCert(originCertPath, sc.log)
|
||||
blocks, err := readOriginCert(originCertPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "Can't read origin cert from %s", originCertPath)
|
||||
}
|
||||
|
|
|
@ -30,6 +30,8 @@ import (
|
|||
const (
|
||||
CredFileFlagAlias = "cred-file"
|
||||
CredFileFlag = "credentials-file"
|
||||
|
||||
LogFieldTunnelID = "tunnelID"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -360,7 +362,7 @@ func runNamedTunnel(sc *subcommandContext, tunnelRef string) error {
|
|||
return errors.Wrap(err, "error parsing tunnel ID")
|
||||
}
|
||||
|
||||
sc.log.Info().Msgf("Starting tunnel %s", tunnelID.String())
|
||||
sc.log.Info().Str(LogFieldTunnelID, tunnelID.String()).Msg("Starting tunnel")
|
||||
|
||||
return sc.run(tunnelID)
|
||||
}
|
||||
|
@ -518,7 +520,7 @@ func routeCommand(c *cli.Context) error {
|
|||
return err
|
||||
}
|
||||
|
||||
sc.log.Info().Msg(res.SuccessSummary())
|
||||
sc.log.Info().Str(LogFieldTunnelID, tunnelID.String()).Msg(res.SuccessSummary())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -25,6 +25,8 @@ const (
|
|||
isManagedInstallFile = ".installedFromPackageManager"
|
||||
UpdateURL = "https://update.argotunnel.com"
|
||||
StagingUpdateURL = "https://staging-update.argotunnel.com"
|
||||
|
||||
LogFieldVersion = "version"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -141,7 +143,7 @@ func Update(c *cli.Context) error {
|
|||
}
|
||||
|
||||
if updateOutcome.noUpdate() {
|
||||
log.Info().Msgf("cloudflared is up to date (%s)", updateOutcome.Version)
|
||||
log.Info().Str(LogFieldVersion, updateOutcome.Version).Msg("cloudflared is up to date")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -152,10 +154,10 @@ func Update(c *cli.Context) error {
|
|||
func loggedUpdate(log *zerolog.Logger, options updateOptions) UpdateOutcome {
|
||||
updateOutcome := checkForUpdateAndApply(options)
|
||||
if updateOutcome.Updated {
|
||||
log.Info().Msgf("cloudflared has been updated to version %s", updateOutcome.Version)
|
||||
log.Info().Str(LogFieldVersion, updateOutcome.Version).Msg("cloudflared has been updated")
|
||||
}
|
||||
if updateOutcome.Error != nil {
|
||||
log.Error().Msgf("update check failed: %s", updateOutcome.Error)
|
||||
log.Err(updateOutcome.Error).Msg("update check failed: %s")
|
||||
}
|
||||
|
||||
return updateOutcome
|
||||
|
@ -203,7 +205,7 @@ func (a *AutoUpdater) Run(ctx context.Context) error {
|
|||
a.log.Info().Msg("Restarting service managed by SysV...")
|
||||
pid, err := a.listeners.StartProcess()
|
||||
if err != nil {
|
||||
a.log.Error().Msgf("Unable to restart server automatically: %s", err)
|
||||
a.log.Err(err).Msg("Unable to restart server automatically")
|
||||
return &statusErr{err: err}
|
||||
}
|
||||
// stop old process after autoupdate. Otherwise we create a new process
|
||||
|
|
|
@ -36,6 +36,8 @@ const (
|
|||
// ERROR_FAILED_SERVICE_CONTROLLER_CONNECT
|
||||
// https://docs.microsoft.com/en-us/windows/desktop/debug/system-error-codes--1000-1299-
|
||||
serviceControllerConnectionFailure = 1063
|
||||
|
||||
LogFieldWindowsServiceName = "windowsServiceName"
|
||||
)
|
||||
|
||||
func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
||||
|
@ -70,7 +72,7 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
|||
|
||||
isIntSess, err := svc.IsAnInteractiveSession()
|
||||
if err != nil {
|
||||
log.Fatal().Msgf("failed to determine if we are running in an interactive session: %v", err)
|
||||
log.Fatal().Err(err).Msg("failed to determine if we are running in an interactive session")
|
||||
}
|
||||
if isIntSess {
|
||||
app.Run(os.Args)
|
||||
|
@ -88,7 +90,7 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
|||
app.Run(os.Args)
|
||||
return
|
||||
}
|
||||
log.Fatal().Msgf("%s service failed: %v", windowsServiceName, err)
|
||||
log.Fatal().Err(err).Msgf("%s service failed", windowsServiceName)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -103,7 +105,7 @@ func (s *windowsService) Execute(serviceArgs []string, r <-chan svc.ChangeReques
|
|||
log := logger.Create(nil)
|
||||
elog, err := eventlog.Open(windowsServiceName)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot open event log for %s with error: %s", windowsServiceName, err)
|
||||
log.Err(err).Msgf("Cannot open event log for %s", windowsServiceName)
|
||||
return
|
||||
}
|
||||
defer elog.Close()
|
||||
|
@ -165,71 +167,74 @@ func (s *windowsService) Execute(serviceArgs []string, r <-chan svc.ChangeReques
|
|||
func installWindowsService(c *cli.Context) error {
|
||||
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||
|
||||
log.Info().Msgf("Installing Argo Tunnel Windows service")
|
||||
log.Info().Msg("Installing Argo Tunnel Windows service")
|
||||
exepath, err := os.Executable()
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot find path name that start the process")
|
||||
log.Err(err).Msg("Cannot find path name that start the process")
|
||||
return err
|
||||
}
|
||||
m, err := mgr.Connect()
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot establish a connection to the service control manager: %s", err)
|
||||
log.Err(err).Msg("Cannot establish a connection to the service control manager")
|
||||
return err
|
||||
}
|
||||
defer m.Disconnect()
|
||||
s, err := m.OpenService(windowsServiceName)
|
||||
log = log.With().Str(LogFieldWindowsServiceName, windowsServiceName).Logger()
|
||||
if err == nil {
|
||||
s.Close()
|
||||
log.Error().Msgf("service %s already exists", windowsServiceName)
|
||||
log.Err(err).Msg("service already exists")
|
||||
return fmt.Errorf("service %s already exists", windowsServiceName)
|
||||
}
|
||||
config := mgr.Config{StartType: mgr.StartAutomatic, DisplayName: windowsServiceDescription}
|
||||
s, err = m.CreateService(windowsServiceName, exepath, config)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot install service %s", windowsServiceName)
|
||||
log.Error().Msg("Cannot install service")
|
||||
return err
|
||||
}
|
||||
defer s.Close()
|
||||
log.Info().Msgf("Argo Tunnel agent service is installed")
|
||||
log.Info().Msg("Argo Tunnel agent service is installed")
|
||||
err = eventlog.InstallAsEventCreate(windowsServiceName, eventlog.Error|eventlog.Warning|eventlog.Info)
|
||||
if err != nil {
|
||||
s.Delete()
|
||||
log.Error().Msgf("Cannot install event logger: %s", err)
|
||||
log.Err(err).Msg("Cannot install event logger")
|
||||
return fmt.Errorf("SetupEventLogSource() failed: %s", err)
|
||||
}
|
||||
err = configRecoveryOption(s.Handle)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot set service recovery actions: %s", err)
|
||||
log.Err(err).Msg("Cannot set service recovery actions")
|
||||
log.Info().Msgf("See %s to manually configure service recovery actions", windowsServiceUrl)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func uninstallWindowsService(c *cli.Context) error {
|
||||
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog).
|
||||
With().
|
||||
Str(LogFieldWindowsServiceName, windowsServiceName).Logger()
|
||||
|
||||
log.Info().Msgf("Uninstalling Argo Tunnel Windows Service")
|
||||
log.Info().Msg("Uninstalling Argo Tunnel Windows Service")
|
||||
m, err := mgr.Connect()
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot establish a connection to the service control manager")
|
||||
log.Error().Msg("Cannot establish a connection to the service control manager")
|
||||
return err
|
||||
}
|
||||
defer m.Disconnect()
|
||||
s, err := m.OpenService(windowsServiceName)
|
||||
if err != nil {
|
||||
log.Error().Msgf("service %s is not installed", windowsServiceName)
|
||||
log.Error().Msg("service is not installed")
|
||||
return fmt.Errorf("service %s is not installed", windowsServiceName)
|
||||
}
|
||||
defer s.Close()
|
||||
err = s.Delete()
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot delete service %s", windowsServiceName)
|
||||
log.Error().Msg("Cannot delete service")
|
||||
return err
|
||||
}
|
||||
log.Info().Msgf("Argo Tunnel agent service is uninstalled")
|
||||
log.Info().Msg("Argo Tunnel agent service is uninstalled")
|
||||
err = eventlog.Remove(windowsServiceName)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Cannot remove event logger")
|
||||
log.Error().Msg("Cannot remove event logger")
|
||||
return fmt.Errorf("RemoveEventLogSource() failed: %s", err)
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -11,9 +11,7 @@ import (
|
|||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
const (
|
||||
lbProbeUserAgentPrefix = "Mozilla/5.0 (compatible; Cloudflare-Traffic-Manager/1.0; +https://www.cloudflare.com/traffic-manager/;"
|
||||
)
|
||||
const LogFieldConnIndex = "connIndex"
|
||||
|
||||
type Config struct {
|
||||
OriginClient OriginClient
|
||||
|
|
|
@ -63,13 +63,18 @@ func (e muxerShutdownError) Error() string {
|
|||
}
|
||||
|
||||
func isHandshakeErrRecoverable(err error, connIndex uint8, observer *Observer) bool {
|
||||
log := observer.log.With().
|
||||
Uint8(LogFieldConnIndex, connIndex).
|
||||
Err(err).
|
||||
Logger()
|
||||
|
||||
switch err.(type) {
|
||||
case edgediscovery.DialError:
|
||||
observer.log.Error().Msgf("Connection %d unable to dial edge: %s", connIndex, err)
|
||||
log.Error().Msg("Connection unable to dial edge")
|
||||
case h2mux.MuxerHandshakeError:
|
||||
observer.log.Error().Msgf("Connection %d handshake with edge server failed: %s", connIndex, err)
|
||||
log.Error().Msg("Connection handshake with edge server failed")
|
||||
default:
|
||||
observer.log.Error().Msgf("Connection %d failed: %s", connIndex, err)
|
||||
log.Error().Msg("Connection failed")
|
||||
return false
|
||||
}
|
||||
return true
|
||||
|
|
|
@ -51,7 +51,7 @@ func (mc *MuxerConfig) H2MuxerConfig(h h2mux.MuxedStreamHandler, log *zerolog.Lo
|
|||
}
|
||||
|
||||
// NewTunnelHandler returns a TunnelHandler, origin LAN IP and error
|
||||
func NewH2muxConnection(ctx context.Context,
|
||||
func NewH2muxConnection(
|
||||
config *Config,
|
||||
muxerConfig *MuxerConfig,
|
||||
edgeConn net.Conn,
|
||||
|
@ -123,7 +123,9 @@ func (h *h2muxConnection) ServeClassicTunnel(ctx context.Context, classicTunnel
|
|||
return nil
|
||||
}
|
||||
// log errors and proceed to RegisterTunnel
|
||||
h.observer.log.Error().Msgf("Couldn't reconnect connection %d. Reregistering it instead. Error was: %v", h.connIndex, err)
|
||||
h.observer.log.Err(err).
|
||||
Uint8(LogFieldConnIndex, h.connIndex).
|
||||
Msg("Couldn't reconnect connection. Re-registering it instead.")
|
||||
}
|
||||
return h.registerTunnel(ctx, credentialManager, classicTunnel, registrationOptions)
|
||||
})
|
||||
|
|
|
@ -26,7 +26,7 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func newH2MuxConnection(ctx context.Context, t require.TestingT) (*h2muxConnection, *h2mux.Muxer) {
|
||||
func newH2MuxConnection(t require.TestingT) (*h2muxConnection, *h2mux.Muxer) {
|
||||
edgeConn, originConn := net.Pipe()
|
||||
edgeMuxChan := make(chan *h2mux.Muxer)
|
||||
go func() {
|
||||
|
@ -38,7 +38,7 @@ func newH2MuxConnection(ctx context.Context, t require.TestingT) (*h2muxConnecti
|
|||
edgeMuxChan <- edgeMux
|
||||
}()
|
||||
var connIndex = uint8(0)
|
||||
h2muxConn, err, _ := NewH2muxConnection(ctx, testConfig, testMuxerConfig, originConn, connIndex, testObserver)
|
||||
h2muxConn, err, _ := NewH2muxConnection(testConfig, testMuxerConfig, originConn, connIndex, testObserver)
|
||||
require.NoError(t, err)
|
||||
return h2muxConn, <-edgeMuxChan
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func TestServeStreamHTTP(t *testing.T) {
|
|||
}
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
h2muxConn, edgeMux := newH2MuxConnection(ctx, t)
|
||||
h2muxConn, edgeMux := newH2MuxConnection(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
@ -120,7 +120,7 @@ func TestServeStreamHTTP(t *testing.T) {
|
|||
|
||||
func TestServeStreamWS(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
h2muxConn, edgeMux := newH2MuxConnection(ctx, t)
|
||||
h2muxConn, edgeMux := newH2MuxConnection(t)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
@ -179,7 +179,7 @@ func hasHeader(stream *h2mux.MuxedStream, name, val string) bool {
|
|||
|
||||
func benchmarkServeStreamHTTPSimple(b *testing.B, test testRequest) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
h2muxConn, edgeMux := newH2MuxConnection(ctx, b)
|
||||
h2muxConn, edgeMux := newH2MuxConnection(b)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(2)
|
||||
|
|
|
@ -2,7 +2,6 @@ package connection
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"math"
|
||||
"net"
|
||||
|
@ -23,10 +22,6 @@ const (
|
|||
controlStreamUpgrade = "control-stream"
|
||||
)
|
||||
|
||||
var (
|
||||
errNotFlusher = errors.New("ResponseWriter doesn't implement http.Flusher")
|
||||
)
|
||||
|
||||
type http2Connection struct {
|
||||
conn net.Conn
|
||||
server *http2.Server
|
||||
|
|
|
@ -380,7 +380,7 @@ func newTunnelMetrics() *tunnelMetrics {
|
|||
serverLocations: serverLocations,
|
||||
oldServerLocations: make(map[string]string),
|
||||
muxerMetrics: newMuxerMetrics(),
|
||||
tunnelsHA: NewTunnelsForHA(),
|
||||
tunnelsHA: newTunnelsForHA(),
|
||||
regSuccess: registerSuccess,
|
||||
regFail: registerFail,
|
||||
rpcFail: rpcFail,
|
||||
|
|
|
@ -10,6 +10,8 @@ import (
|
|||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
const LogFieldLocation = "location"
|
||||
|
||||
type Observer struct {
|
||||
log *zerolog.Logger
|
||||
metrics *tunnelMetrics
|
||||
|
@ -28,7 +30,10 @@ func NewObserver(log *zerolog.Logger, tunnelEventChans []chan Event, uiEnabled b
|
|||
|
||||
func (o *Observer) logServerInfo(connIndex uint8, location, msg string) {
|
||||
o.sendEvent(Event{Index: connIndex, EventType: Connected, Location: location})
|
||||
o.log.Info().Msgf(msg)
|
||||
o.log.Info().
|
||||
Uint8(LogFieldConnIndex, connIndex).
|
||||
Str(LogFieldLocation, location).
|
||||
Msg(msg)
|
||||
o.metrics.registerServerLocation(uint8ToString(connIndex), location)
|
||||
}
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ func (s *autoProtocolSelector) Current() Protocol {
|
|||
|
||||
percentage, err := s.fetchFunc()
|
||||
if err != nil {
|
||||
s.log.Error().Msgf("Failed to refresh protocol, err: %v", err)
|
||||
s.log.Err(err).Msg("Failed to refresh protocol")
|
||||
return s.current
|
||||
}
|
||||
|
||||
|
|
|
@ -109,7 +109,7 @@ func (rsc *registrationServerClient) RegisterConnection(
|
|||
|
||||
observer.metrics.regSuccess.WithLabelValues("registerConnection").Inc()
|
||||
|
||||
observer.logServerInfo(connIndex, conn.Location, fmt.Sprintf("Connection %d registered with %s using ID %s", connIndex, conn.Location, conn.UUID))
|
||||
observer.logServerInfo(connIndex, conn.Location, fmt.Sprintf("Connection %s registered", conn.UUID))
|
||||
observer.sendConnectedEvent(connIndex, conn.Location)
|
||||
|
||||
return nil
|
||||
|
@ -260,15 +260,15 @@ func (h *h2muxConnection) logServerInfo(ctx context.Context, rpcClient *tunnelSe
|
|||
})
|
||||
serverInfoMessage, err := serverInfoPromise.Result().Struct()
|
||||
if err != nil {
|
||||
h.observer.log.Error().Msgf("Failed to retrieve server information: %s", err)
|
||||
h.observer.log.Err(err).Msg("Failed to retrieve server information")
|
||||
return err
|
||||
}
|
||||
serverInfo, err := tunnelpogs.UnmarshalServerInfo(serverInfoMessage)
|
||||
if err != nil {
|
||||
h.observer.log.Error().Msgf("Failed to retrieve server information: %s", err)
|
||||
h.observer.log.Err(err).Msg("Failed to retrieve server information")
|
||||
return err
|
||||
}
|
||||
h.observer.logServerInfo(h.connIndex, serverInfo.LocationName, fmt.Sprintf("Connection %d connected to %s", h.connIndex, serverInfo.LocationName))
|
||||
h.observer.logServerInfo(h.connIndex, serverInfo.LocationName, "Connection established")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ type tunnelsForHA struct {
|
|||
}
|
||||
|
||||
// NewTunnelsForHA initializes the Prometheus metrics etc for a tunnelsForHA.
|
||||
func NewTunnelsForHA() tunnelsForHA {
|
||||
func newTunnelsForHA() tunnelsForHA {
|
||||
metrics := prometheus.NewGaugeVec(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "tunnel_ids",
|
||||
|
|
|
@ -18,6 +18,10 @@ import (
|
|||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
const (
|
||||
LogFieldCFRay = "cfRay"
|
||||
)
|
||||
|
||||
// Proxy is an HTTP server that proxies requests to a Client.
|
||||
type Proxy struct {
|
||||
client Client
|
||||
|
@ -93,8 +97,7 @@ func (proxy *Proxy) IsAllowed(r *http.Request, verbose ...bool) bool {
|
|||
// Warn administrators that invalid JWTs are being rejected. This is indicative
|
||||
// of either a misconfiguration of the CLI or a massive failure of upstream systems.
|
||||
if len(verbose) > 0 {
|
||||
cfRay := proxy.getRayHeader(r)
|
||||
proxy.log.Info().Msgf("dbproxy: Failed JWT authentication: cf-ray: %s %s", cfRay, err)
|
||||
proxy.log.Err(err).Str(LogFieldCFRay, proxy.getRayHeader(r)).Msg("dbproxy: Failed JWT authentication")
|
||||
}
|
||||
|
||||
return false
|
||||
|
@ -239,7 +242,7 @@ func (proxy *Proxy) httpRespondErr(w http.ResponseWriter, r *http.Request, defau
|
|||
proxy.httpRespond(w, r, status, err.Error())
|
||||
if len(err.Error()) > 0 {
|
||||
cfRay := proxy.getRayHeader(r)
|
||||
proxy.log.Info().Msgf("dbproxy: Database proxy error: cf-ray: %s %s", cfRay, err)
|
||||
proxy.log.Err(err).Str(LogFieldCFRay, cfRay).Msg("dbproxy: Database proxy error")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -21,12 +21,7 @@ const (
|
|||
// discover HA origintunneld servers (GitHub issue #75).
|
||||
dotServerName = "cloudflare-dns.com"
|
||||
dotServerAddr = "1.1.1.1:853"
|
||||
dotTimeout = time.Duration(15 * time.Second)
|
||||
|
||||
// SRV record resolution TTL
|
||||
resolveEdgeAddrTTL = 1 * time.Hour
|
||||
|
||||
subsystemEdgeAddrResolver = "edgeAddrResolver"
|
||||
dotTimeout = 15 * time.Second
|
||||
)
|
||||
|
||||
// Redeclare network functions so they can be overridden in tests.
|
||||
|
@ -64,7 +59,7 @@ func edgeDiscovery(log *zerolog.Logger) ([][]*net.TCPAddr, error) {
|
|||
_, fallbackAddrs, fallbackErr := fallbackLookupSRV(srvService, srvProto, srvName)
|
||||
if fallbackErr != nil || len(fallbackAddrs) == 0 {
|
||||
// use the original DNS error `err` in messages, not `fallbackErr`
|
||||
log.Error().Msgf("Error looking up Cloudflare edge IPs: the DNS query failed: %s", err)
|
||||
log.Err(err).Msg("Error looking up Cloudflare edge IPs: the DNS query failed")
|
||||
for _, s := range friendlyDNSErrorLines {
|
||||
log.Error().Msg(s)
|
||||
}
|
||||
|
@ -126,7 +121,7 @@ func ResolveAddrs(addrs []string, log *zerolog.Logger) (resolved []*net.TCPAddr)
|
|||
for _, addr := range addrs {
|
||||
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Failed to resolve %s, err: %v", addr, err)
|
||||
log.Err(err).Msgf("Failed to resolve %s", addr)
|
||||
} else {
|
||||
resolved = append(resolved, tcpAddr)
|
||||
}
|
||||
|
|
|
@ -10,10 +10,11 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
subsystem = "edgediscovery"
|
||||
LogFieldAddress = "address"
|
||||
LogFieldConnIndex = "connIndex"
|
||||
)
|
||||
|
||||
var errNoAddressesLeft = fmt.Errorf("There are no free edge addresses left")
|
||||
var errNoAddressesLeft = fmt.Errorf("there are no free edge addresses left")
|
||||
|
||||
// Edge finds addresses on the Cloudflare edge and hands them out to connections.
|
||||
type Edge struct {
|
||||
|
@ -76,42 +77,46 @@ func (ed *Edge) GetAddrForRPC() (*net.TCPAddr, error) {
|
|||
}
|
||||
|
||||
// GetAddr gives this proxy connection an edge Addr. Prefer Addrs this connection has already used.
|
||||
func (ed *Edge) GetAddr(connID int) (*net.TCPAddr, error) {
|
||||
func (ed *Edge) GetAddr(connIndex int) (*net.TCPAddr, error) {
|
||||
log := ed.log.With().Int(LogFieldConnIndex, connIndex).Logger()
|
||||
|
||||
ed.Lock()
|
||||
defer ed.Unlock()
|
||||
|
||||
// If this connection has already used an edge addr, return it.
|
||||
if addr := ed.regions.AddrUsedBy(connID); addr != nil {
|
||||
ed.log.Debug().Msgf("edgediscovery - GetAddr: Returning same address back to proxy connection: connID: %d", connID)
|
||||
if addr := ed.regions.AddrUsedBy(connIndex); addr != nil {
|
||||
log.Debug().Msg("edgediscovery - GetAddr: Returning same address back to proxy connection")
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// Otherwise, give it an unused one
|
||||
addr := ed.regions.GetUnusedAddr(nil, connID)
|
||||
addr := ed.regions.GetUnusedAddr(nil, connIndex)
|
||||
if addr == nil {
|
||||
ed.log.Debug().Msgf("edgediscovery - GetAddr: No addresses left to give proxy connection: connID: %d", connID)
|
||||
log.Debug().Msg("edgediscovery - GetAddr: No addresses left to give proxy connection")
|
||||
return nil, errNoAddressesLeft
|
||||
}
|
||||
ed.log.Debug().Msgf("edgediscovery - GetAddr: Giving connection its new address %s: connID: %d", addr, connID)
|
||||
log.Debug().Str(LogFieldAddress, addr.String()).Msg("edgediscovery - GetAddr: Giving connection its new address")
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
// GetDifferentAddr gives back the proxy connection's edge Addr and uses a new one.
|
||||
func (ed *Edge) GetDifferentAddr(connID int) (*net.TCPAddr, error) {
|
||||
func (ed *Edge) GetDifferentAddr(connIndex int) (*net.TCPAddr, error) {
|
||||
log := ed.log.With().Int(LogFieldConnIndex, connIndex).Logger()
|
||||
|
||||
ed.Lock()
|
||||
defer ed.Unlock()
|
||||
|
||||
oldAddr := ed.regions.AddrUsedBy(connID)
|
||||
oldAddr := ed.regions.AddrUsedBy(connIndex)
|
||||
if oldAddr != nil {
|
||||
ed.regions.GiveBack(oldAddr)
|
||||
}
|
||||
addr := ed.regions.GetUnusedAddr(oldAddr, connID)
|
||||
addr := ed.regions.GetUnusedAddr(oldAddr, connIndex)
|
||||
if addr == nil {
|
||||
ed.log.Debug().Msgf("edgediscovery - GetDifferentAddr: No addresses left to give proxy connection: connID: %d", connID)
|
||||
log.Debug().Msg("edgediscovery - GetDifferentAddr: No addresses left to give proxy connection")
|
||||
// note: if oldAddr were not nil, it will become available on the next iteration
|
||||
return nil, errNoAddressesLeft
|
||||
}
|
||||
ed.log.Debug().Msgf("edgediscovery - GetDifferentAddr: Giving connection its new address %s: connID: %d", addr, connID)
|
||||
log.Debug().Str(LogFieldAddress, addr.String()).Msg("edgediscovery - GetDifferentAddr: Giving connection its new address")
|
||||
return addr, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -168,19 +168,19 @@ func websocketHandler(log *zerolog.Logger, upgrader websocket.Upgrader) http.Han
|
|||
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
log.Error().Msgf("failed to upgrade to websocket connection, error: %s", err)
|
||||
log.Err(err).Msg("failed to upgrade to websocket connection")
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
for {
|
||||
mt, message, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
log.Error().Msgf("websocket read message error: %s", err)
|
||||
log.Err(err).Msg("websocket read message error")
|
||||
break
|
||||
}
|
||||
|
||||
if err := conn.WriteMessage(mt, message); err != nil {
|
||||
log.Error().Msgf("websocket write message error: %s", err)
|
||||
log.Err(err).Msg("websocket write message error")
|
||||
break
|
||||
}
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ func ServeMetrics(
|
|||
log.Info().Msg("Metrics server stopped")
|
||||
return nil
|
||||
}
|
||||
log.Error().Msgf("Metrics server quit with error: %s", err)
|
||||
log.Err(err).Msg("Metrics server failed")
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -110,7 +110,9 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re
|
|||
if timer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate); err == nil {
|
||||
refreshAuthBackoffTimer = timer
|
||||
} else {
|
||||
s.log.Error().Msgf("supervisor: initial refreshAuth failed, retrying in %v: %s", refreshAuthRetryDuration, err)
|
||||
s.log.Err(err).
|
||||
Dur("refreshAuthRetryDuration", refreshAuthRetryDuration).
|
||||
Msgf("supervisor: initial refreshAuth failed, retrying in %v", refreshAuthRetryDuration)
|
||||
refreshAuthBackoffTimer = time.After(refreshAuthRetryDuration)
|
||||
}
|
||||
}
|
||||
|
@ -129,7 +131,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re
|
|||
case tunnelError := <-s.tunnelErrors:
|
||||
tunnelsActive--
|
||||
if tunnelError.err != nil {
|
||||
s.log.Info().Msgf("supervisor: Tunnel disconnected due to error: %s", tunnelError.err)
|
||||
s.log.Err(tunnelError.err).Msg("supervisor: Tunnel disconnected")
|
||||
tunnelsWaiting = append(tunnelsWaiting, tunnelError.index)
|
||||
s.waitForNextTunnel(tunnelError.index)
|
||||
|
||||
|
@ -152,7 +154,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re
|
|||
case <-refreshAuthBackoffTimer:
|
||||
newTimer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate)
|
||||
if err != nil {
|
||||
s.log.Error().Msgf("supervisor: Authentication failed: %s", err)
|
||||
s.log.Err(err).Msg("supervisor: Authentication failed")
|
||||
// Permanent failure. Leave the `select` without setting the
|
||||
// channel to be non-null, so we'll never hit this case of the `select` again.
|
||||
continue
|
||||
|
@ -211,7 +213,16 @@ func (s *Supervisor) startFirstTunnel(ctx context.Context, connectedSignal *sign
|
|||
return
|
||||
}
|
||||
|
||||
err = ServeTunnelLoop(ctx, s.reconnectCredentialManager, s.config, addr, firstConnIndex, connectedSignal, s.cloudflaredUUID, reconnectCh)
|
||||
err = ServeTunnelLoop(
|
||||
ctx,
|
||||
s.reconnectCredentialManager,
|
||||
s.config,
|
||||
addr,
|
||||
firstConnIndex,
|
||||
connectedSignal,
|
||||
s.cloudflaredUUID,
|
||||
reconnectCh,
|
||||
)
|
||||
// If the first tunnel disconnects, keep restarting it.
|
||||
edgeErrors := 0
|
||||
for s.unusedIPs() {
|
||||
|
@ -234,7 +245,16 @@ func (s *Supervisor) startFirstTunnel(ctx context.Context, connectedSignal *sign
|
|||
return
|
||||
}
|
||||
}
|
||||
err = ServeTunnelLoop(ctx, s.reconnectCredentialManager, s.config, addr, firstConnIndex, connectedSignal, s.cloudflaredUUID, reconnectCh)
|
||||
err = ServeTunnelLoop(
|
||||
ctx,
|
||||
s.reconnectCredentialManager,
|
||||
s.config,
|
||||
addr,
|
||||
firstConnIndex,
|
||||
connectedSignal,
|
||||
s.cloudflaredUUID,
|
||||
reconnectCh,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -253,7 +273,16 @@ func (s *Supervisor) startTunnel(ctx context.Context, index int, connectedSignal
|
|||
if err != nil {
|
||||
return
|
||||
}
|
||||
err = ServeTunnelLoop(ctx, s.reconnectCredentialManager, s.config, addr, uint8(index), connectedSignal, s.cloudflaredUUID, reconnectCh)
|
||||
err = ServeTunnelLoop(
|
||||
ctx,
|
||||
s.reconnectCredentialManager,
|
||||
s.config,
|
||||
addr,
|
||||
uint8(index),
|
||||
connectedSignal,
|
||||
s.cloudflaredUUID,
|
||||
reconnectCh,
|
||||
)
|
||||
}
|
||||
|
||||
func (s *Supervisor) newConnectedTunnelSignal(index int) *signal.Signal {
|
||||
|
|
|
@ -151,7 +151,8 @@ func StartTunnelDaemon(ctx context.Context, config *TunnelConfig, connectedSigna
|
|||
return s.Run(ctx, connectedSignal, reconnectCh)
|
||||
}
|
||||
|
||||
func ServeTunnelLoop(ctx context.Context,
|
||||
func ServeTunnelLoop(
|
||||
ctx context.Context,
|
||||
credentialManager *reconnectCredentialManager,
|
||||
config *TunnelConfig,
|
||||
addr *net.TCPAddr,
|
||||
|
@ -163,6 +164,8 @@ func ServeTunnelLoop(ctx context.Context,
|
|||
haConnections.Inc()
|
||||
defer haConnections.Dec()
|
||||
|
||||
connLog := config.Log.With().Uint8(connection.LogFieldConnIndex, connIndex).Logger()
|
||||
|
||||
protocallFallback := &protocallFallback{
|
||||
BackoffHandler{MaxRetries: config.Retries},
|
||||
config.ProtocolSelector.Current(),
|
||||
|
@ -181,6 +184,7 @@ func ServeTunnelLoop(ctx context.Context,
|
|||
for {
|
||||
err, recoverable := ServeTunnel(
|
||||
ctx,
|
||||
&connLog,
|
||||
credentialManager,
|
||||
config,
|
||||
addr,
|
||||
|
@ -195,7 +199,7 @@ func ServeTunnelLoop(ctx context.Context,
|
|||
return err
|
||||
}
|
||||
|
||||
err = waitForBackoff(ctx, protocallFallback, config, connIndex, err)
|
||||
err = waitForBackoff(ctx, &connLog, protocallFallback, config, connIndex, err)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -224,6 +228,7 @@ func (pf *protocallFallback) fallback(fallback connection.Protocol) {
|
|||
// Expect err to always be non nil
|
||||
func waitForBackoff(
|
||||
ctx context.Context,
|
||||
log *zerolog.Logger,
|
||||
protobackoff *protocallFallback,
|
||||
config *TunnelConfig,
|
||||
connIndex uint8,
|
||||
|
@ -235,7 +240,9 @@ func waitForBackoff(
|
|||
}
|
||||
|
||||
config.Observer.SendReconnect(connIndex)
|
||||
config.Log.Info().Msgf("Retrying connection %d in %s seconds, error %v", connIndex, duration, err)
|
||||
log.Info().
|
||||
Err(err).
|
||||
Msgf("Retrying connection in %s seconds", duration)
|
||||
protobackoff.Backoff(ctx)
|
||||
|
||||
if protobackoff.ReachedMaxRetries() {
|
||||
|
@ -247,7 +254,7 @@ func waitForBackoff(
|
|||
if protobackoff.protocol == fallback {
|
||||
return err
|
||||
}
|
||||
config.Log.Info().Msgf("Fallback to use %s", fallback)
|
||||
log.Info().Msgf("Fallback to use %s", fallback)
|
||||
protobackoff.fallback(fallback)
|
||||
} else if !protobackoff.inFallback {
|
||||
current := config.ProtocolSelector.Current()
|
||||
|
@ -261,6 +268,7 @@ func waitForBackoff(
|
|||
|
||||
func ServeTunnel(
|
||||
ctx context.Context,
|
||||
log *zerolog.Logger,
|
||||
credentialManager *reconnectCredentialManager,
|
||||
config *TunnelConfig,
|
||||
addr *net.TCPAddr,
|
||||
|
@ -296,17 +304,28 @@ func ServeTunnel(
|
|||
}
|
||||
if protocol == connection.HTTP2 {
|
||||
connOptions := config.ConnectionOptions(edgeConn.LocalAddr().String(), uint8(backoff.retries))
|
||||
return ServeHTTP2(ctx, config, edgeConn, connOptions, connIndex, connectedFuse, reconnectCh)
|
||||
return ServeHTTP2(ctx, log, config, edgeConn, connOptions, connIndex, connectedFuse, reconnectCh)
|
||||
}
|
||||
return ServeH2mux(ctx, credentialManager, config, edgeConn, connIndex, connectedFuse, cloudflaredUUID, reconnectCh)
|
||||
return ServeH2mux(
|
||||
ctx,
|
||||
log,
|
||||
credentialManager,
|
||||
config,
|
||||
edgeConn,
|
||||
connIndex,
|
||||
connectedFuse,
|
||||
cloudflaredUUID,
|
||||
reconnectCh,
|
||||
)
|
||||
}
|
||||
|
||||
func ServeH2mux(
|
||||
ctx context.Context,
|
||||
log *zerolog.Logger,
|
||||
credentialManager *reconnectCredentialManager,
|
||||
config *TunnelConfig,
|
||||
edgeConn net.Conn,
|
||||
connectionIndex uint8,
|
||||
connIndex uint8,
|
||||
connectedFuse *connectedFuse,
|
||||
cloudflaredUUID uuid.UUID,
|
||||
reconnectCh chan ReconnectSignal,
|
||||
|
@ -314,11 +333,10 @@ func ServeH2mux(
|
|||
config.Log.Debug().Msgf("Connecting via h2mux")
|
||||
// Returns error from parsing the origin URL or handshake errors
|
||||
handler, err, recoverable := connection.NewH2muxConnection(
|
||||
ctx,
|
||||
config.ConnectionConfig,
|
||||
config.MuxerConfig,
|
||||
edgeConn,
|
||||
connectionIndex,
|
||||
connIndex,
|
||||
config.Observer,
|
||||
)
|
||||
if err != nil {
|
||||
|
@ -332,7 +350,7 @@ func ServeH2mux(
|
|||
connOptions := config.ConnectionOptions(edgeConn.LocalAddr().String(), uint8(connectedFuse.backoff.retries))
|
||||
return handler.ServeNamedTunnel(serveCtx, config.NamedTunnel, credentialManager, connOptions, connectedFuse)
|
||||
}
|
||||
registrationOptions := config.RegistrationOptions(connectionIndex, edgeConn.LocalAddr().String(), cloudflaredUUID)
|
||||
registrationOptions := config.RegistrationOptions(connIndex, edgeConn.LocalAddr().String(), cloudflaredUUID)
|
||||
return handler.ServeClassicTunnel(serveCtx, config.ClassicTunnel, credentialManager, registrationOptions, connectedFuse)
|
||||
})
|
||||
|
||||
|
@ -345,29 +363,31 @@ func ServeH2mux(
|
|||
// don't retry this connection anymore, let supervisor pick new a address
|
||||
return err, false
|
||||
case *serverRegisterTunnelError:
|
||||
config.Log.Error().Msgf("Register tunnel error from server side: %s", err.cause)
|
||||
log.Err(err).Msg("Register tunnel error from server side")
|
||||
// Don't send registration error return from server to Sentry. They are
|
||||
// logged on server side
|
||||
if incidents := config.IncidentLookup.ActiveIncidents(); len(incidents) > 0 {
|
||||
config.Log.Error().Msg(activeIncidentsMsg(incidents))
|
||||
log.Error().Msg(activeIncidentsMsg(incidents))
|
||||
}
|
||||
return err.cause, !err.permanent
|
||||
case *clientRegisterTunnelError:
|
||||
config.Log.Error().Msgf("Register tunnel error on client side: %s", err.cause)
|
||||
log.Err(err).Msg("Register tunnel error on client side")
|
||||
return err, true
|
||||
case *muxerShutdownError:
|
||||
config.Log.Info().Msg("Muxer shutdown")
|
||||
log.Info().Msg("Muxer shutdown")
|
||||
return err, true
|
||||
case *ReconnectSignal:
|
||||
config.Log.Info().Msgf("Restarting connection %d due to reconnect signal in %s", connectionIndex, err.Delay)
|
||||
log.Info().
|
||||
Uint8(connection.LogFieldConnIndex, connIndex).
|
||||
Msgf("Restarting connection due to reconnect signal in %s", err.Delay)
|
||||
err.DelayBeforeReconnect()
|
||||
return err, true
|
||||
default:
|
||||
if err == context.Canceled {
|
||||
config.Log.Debug().Msgf("Serve tunnel error: %s", err)
|
||||
log.Debug().Err(err).Msgf("Serve tunnel error")
|
||||
return err, false
|
||||
}
|
||||
config.Log.Error().Msgf("Serve tunnel error: %s", err)
|
||||
log.Err(err).Msgf("Serve tunnel error")
|
||||
return err, true
|
||||
}
|
||||
}
|
||||
|
@ -376,6 +396,7 @@ func ServeH2mux(
|
|||
|
||||
func ServeHTTP2(
|
||||
ctx context.Context,
|
||||
log *zerolog.Logger,
|
||||
config *TunnelConfig,
|
||||
tlsServerConn net.Conn,
|
||||
connOptions *tunnelpogs.ConnectionOptions,
|
||||
|
@ -383,7 +404,7 @@ func ServeHTTP2(
|
|||
connectedFuse connection.ConnectedFuse,
|
||||
reconnectCh chan ReconnectSignal,
|
||||
) (err error, recoverable bool) {
|
||||
config.Log.Debug().Msgf("Connecting via http2")
|
||||
log.Debug().Msgf("Connecting via http2")
|
||||
server := connection.NewHTTP2Connection(
|
||||
tlsServerConn,
|
||||
config.ConnectionConfig,
|
||||
|
@ -397,7 +418,7 @@ func ServeHTTP2(
|
|||
errGroup, serveCtx := errgroup.WithContext(ctx)
|
||||
errGroup.Go(func() error {
|
||||
server.Serve(serveCtx)
|
||||
return fmt.Errorf("Connection with edge closed")
|
||||
return fmt.Errorf("connection with edge closed")
|
||||
})
|
||||
|
||||
errGroup.Go(listenReconnect(serveCtx, reconnectCh))
|
||||
|
|
|
@ -68,14 +68,14 @@ func TestWaitForBackoffFallback(t *testing.T) {
|
|||
|
||||
// Retry #0 and #1. At retry #2, we switch protocol, so the fallback loop has one more retry than this
|
||||
for i := 0; i < int(maxRetries-1); i++ {
|
||||
err := waitForBackoff(ctx, protocallFallback, config, connIndex, fmt.Errorf("Some error"))
|
||||
err := waitForBackoff(ctx, &log, protocallFallback, config, connIndex, fmt.Errorf("some error"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, initProtocol, protocallFallback.protocol)
|
||||
}
|
||||
|
||||
// Retry fallback protocol
|
||||
for i := 0; i < int(maxRetries); i++ {
|
||||
err := waitForBackoff(ctx, protocallFallback, config, connIndex, fmt.Errorf("Some error"))
|
||||
err := waitForBackoff(ctx, &log, protocallFallback, config, connIndex, fmt.Errorf("some error"))
|
||||
assert.NoError(t, err)
|
||||
fallback, ok := protocolSelector.Fallback()
|
||||
assert.True(t, ok)
|
||||
|
@ -86,11 +86,11 @@ func TestWaitForBackoffFallback(t *testing.T) {
|
|||
assert.Equal(t, initProtocol, currentGlobalProtocol)
|
||||
|
||||
// No protocol to fallback, return error
|
||||
err = waitForBackoff(ctx, protocallFallback, config, connIndex, fmt.Errorf("Some error"))
|
||||
err = waitForBackoff(ctx, &log, protocallFallback, config, connIndex, fmt.Errorf("some error"))
|
||||
assert.Error(t, err)
|
||||
|
||||
protocallFallback.reset()
|
||||
err = waitForBackoff(ctx, protocallFallback, config, connIndex, fmt.Errorf("New error"))
|
||||
err = waitForBackoff(ctx, &log, protocallFallback, config, connIndex, fmt.Errorf("new error"))
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, initProtocol, protocallFallback.protocol)
|
||||
}
|
||||
|
|
|
@ -170,7 +170,7 @@ func loadGlobalCertPool(log *zerolog.Logger) (*x509.CertPool, error) {
|
|||
certPool, err := x509.SystemCertPool()
|
||||
if err != nil {
|
||||
if runtime.GOOS != "windows" { // See https://github.com/golang/go/issues/16736
|
||||
log.Info().Msgf("error obtaining the system certificates: %s", err)
|
||||
log.Err(err).Msg("error obtaining the system certificates")
|
||||
}
|
||||
certPool = x509.NewCertPool()
|
||||
}
|
||||
|
|
|
@ -49,12 +49,12 @@ func (u *UpstreamHTTPS) Exchange(ctx context.Context, query *dns.Msg) (*dns.Msg,
|
|||
for _, bootstrap := range u.bootstraps {
|
||||
endpoint, client, err := configureBootstrap(bootstrap)
|
||||
if err != nil {
|
||||
u.log.Error().Msgf("failed to configure boostrap upstream %s: %s", bootstrap, err)
|
||||
u.log.Err(err).Msgf("failed to configure boostrap upstream %s", bootstrap)
|
||||
continue
|
||||
}
|
||||
msg, err := exchange(queryBuf, query.Id, endpoint, client, u.log)
|
||||
if err != nil {
|
||||
u.log.Error().Msgf("failed to connect to a boostrap upstream %s: %s", bootstrap, err)
|
||||
u.log.Err(err).Msgf("failed to connect to a boostrap upstream %s", bootstrap)
|
||||
continue
|
||||
}
|
||||
return msg, nil
|
||||
|
@ -78,7 +78,7 @@ func exchange(msg []byte, queryID uint16, endpoint *url.URL, client *http.Client
|
|||
return response, nil
|
||||
}
|
||||
|
||||
log.Error().Msgf("failed to connect to an HTTPS backend %q: %s", endpoint, backendErr)
|
||||
log.Err(backendErr).Msgf("failed to connect to an HTTPS backend %q", endpoint)
|
||||
return nil, backendErr
|
||||
}
|
||||
|
||||
|
|
|
@ -20,6 +20,11 @@ import (
|
|||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
const (
|
||||
LogFieldAddress = "address"
|
||||
LogFieldURL = "url"
|
||||
)
|
||||
|
||||
// Listener is an adapter between CoreDNS server and Warp runnable
|
||||
type Listener struct {
|
||||
server *dnsserver.Server
|
||||
|
@ -75,14 +80,20 @@ func Run(c *cli.Context) error {
|
|||
|
||||
metricsListener, err := net.Listen("tcp", c.String("metrics"))
|
||||
if err != nil {
|
||||
log.Fatal().Msgf("Failed to open the metrics listener: %s", err)
|
||||
log.Fatal().Err(err).Msg("Failed to open the metrics listener")
|
||||
}
|
||||
|
||||
go metrics.ServeMetrics(metricsListener, nil, nil, log)
|
||||
|
||||
listener, err := CreateListener(c.String("address"), uint16(c.Uint("port")), c.StringSlice("upstream"), c.StringSlice("bootstrap"), log)
|
||||
listener, err := CreateListener(
|
||||
c.String("address"),
|
||||
uint16(c.Uint("port")),
|
||||
c.StringSlice("upstream"),
|
||||
c.StringSlice("bootstrap"),
|
||||
log,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Failed to create the listeners: %s", err)
|
||||
log.Err(err).Msg("Failed to create the listeners")
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -90,7 +101,7 @@ func Run(c *cli.Context) error {
|
|||
readySignal := make(chan struct{})
|
||||
err = listener.Start(readySignal)
|
||||
if err != nil {
|
||||
log.Error().Msgf("Failed to start the listeners: %s", err)
|
||||
log.Err(err).Msg("Failed to start the listeners")
|
||||
return listener.Stop()
|
||||
}
|
||||
<-readySignal
|
||||
|
@ -104,7 +115,7 @@ func Run(c *cli.Context) error {
|
|||
// Shut down server
|
||||
err = listener.Stop()
|
||||
if err != nil {
|
||||
log.Error().Msgf("failed to stop: %s", err)
|
||||
log.Err(err).Msg("failed to stop")
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
@ -125,7 +136,7 @@ func createConfig(address string, port uint16, p plugin.Handler) *dnsserver.Conf
|
|||
// Start blocks for serving requests
|
||||
func (l *Listener) Start(readySignal chan struct{}) error {
|
||||
defer close(readySignal)
|
||||
l.log.Info().Msgf("Starting DNS over HTTPS proxy server on: %s", l.server.Address())
|
||||
l.log.Info().Str(LogFieldAddress, l.server.Address()).Msg("Starting DNS over HTTPS proxy server")
|
||||
|
||||
// Start UDP listener
|
||||
if udp, err := l.server.ListenPacket(); err == nil {
|
||||
|
@ -166,7 +177,7 @@ func CreateListener(address string, port uint16, upstreams []string, bootstraps
|
|||
// Build the list of upstreams
|
||||
upstreamList := make([]Upstream, 0)
|
||||
for _, url := range upstreams {
|
||||
log.Info().Msgf("Adding DNS upstream - url: %s", url)
|
||||
log.Info().Str(LogFieldURL, url).Msg("Adding DNS upstream")
|
||||
upstream, err := NewUpstreamHTTPS(url, bootstraps, log)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to create HTTPS upstream")
|
||||
|
|
|
@ -173,7 +173,7 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
|
||||
stream, err := net.Dial("tcp", finalDestination)
|
||||
if err != nil {
|
||||
h.log.Error().Msgf("Cannot connect to remote: %s", err)
|
||||
h.log.Err(err).Msg("Cannot connect to remote")
|
||||
return
|
||||
}
|
||||
defer stream.Close()
|
||||
|
@ -184,7 +184,7 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||
}
|
||||
conn, err := h.upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
h.log.Error().Msgf("failed to upgrade: %s", err)
|
||||
h.log.Err(err).Msg("failed to upgrade")
|
||||
return
|
||||
}
|
||||
_ = conn.SetReadDeadline(time.Now().Add(pongWait))
|
||||
|
|
Loading…
Reference in New Issue