diff --git a/carrier/carrier.go b/carrier/carrier.go index b289729d..617871a1 100644 --- a/carrier/carrier.go +++ b/carrier/carrier.go @@ -12,8 +12,9 @@ import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/token" "github.com/cloudflare/cloudflared/h2mux" - "github.com/cloudflare/cloudflared/logger" + "github.com/pkg/errors" + "github.com/rs/zerolog" ) type StartOptions struct { @@ -49,7 +50,7 @@ func (c *StdinoutStream) Write(p []byte) (int, error) { // Helper to allow defering the response close with a check that the resp is not nil func closeRespBody(resp *http.Response) { if resp != nil { - resp.Body.Close() + _ = resp.Body.Close() } } @@ -103,7 +104,7 @@ func Serve(remoteConn Connection, listener net.Listener, shutdownC <-chan struct // serveConnection handles connections for the Serve() call func serveConnection(remoteConn Connection, c net.Conn, options *StartOptions) { defer c.Close() - remoteConn.ServeStream(options, c) + _ = remoteConn.ServeStream(options, c) } // IsAccessResponse checks the http Response to see if the url location @@ -125,13 +126,13 @@ func IsAccessResponse(resp *http.Response) bool { } // BuildAccessRequest builds an HTTP request with the Access token set -func BuildAccessRequest(options *StartOptions, logger logger.Service) (*http.Request, error) { +func BuildAccessRequest(options *StartOptions, log *zerolog.Logger) (*http.Request, error) { req, err := http.NewRequest(http.MethodGet, options.OriginURL, nil) if err != nil { return nil, err } - token, err := token.FetchTokenWithRedirect(req.URL, logger) + token, err := token.FetchTokenWithRedirect(req.URL, log) if err != nil { return nil, err } diff --git a/carrier/carrier_test.go b/carrier/carrier_test.go index e5b4dc6f..0300e3aa 100644 --- a/carrier/carrier_test.go +++ b/carrier/carrier_test.go @@ -9,8 +9,8 @@ import ( "sync" "testing" - "github.com/cloudflare/cloudflared/logger" ws "github.com/gorilla/websocket" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" ) @@ -43,8 +43,8 @@ func (s *testStreamer) Write(p []byte) (int, error) { func TestStartClient(t *testing.T) { message := "Good morning Austin! Time for another sunny day in the great state of Texas." - logger := logger.NewOutputWriter(logger.NewMockWriteManager()) - wsConn := NewWSConnection(logger, false) + log := zerolog.Nop() + wsConn := NewWSConnection(&log, false) ts := newTestWebSocketServer() defer ts.Close() @@ -55,10 +55,10 @@ func TestStartClient(t *testing.T) { } err := StartClient(wsConn, buf, options) assert.NoError(t, err) - buf.Write([]byte(message)) + _, _ = buf.Write([]byte(message)) readBuffer := make([]byte, len(message)) - buf.Read(readBuffer) + _, _ = buf.Read(readBuffer) assert.Equal(t, message, string(readBuffer)) } @@ -68,9 +68,9 @@ func TestStartServer(t *testing.T) { t.Fatalf("Error starting listener: %v", err) } message := "Good morning Austin! Time for another sunny day in the great state of Texas." - logger := logger.NewOutputWriter(logger.NewMockWriteManager()) + log := zerolog.Nop() shutdownC := make(chan struct{}) - wsConn := NewWSConnection(logger, false) + wsConn := NewWSConnection(&log, false) ts := newTestWebSocketServer() defer ts.Close() options := &StartOptions{ @@ -86,10 +86,10 @@ func TestStartServer(t *testing.T) { }() conn, err := net.Dial("tcp", listener.Addr().String()) - conn.Write([]byte(message)) + _, _ = conn.Write([]byte(message)) readBuffer := make([]byte, len(message)) - conn.Read(readBuffer) + _, _ = conn.Read(readBuffer) assert.Equal(t, string(readBuffer), message) } diff --git a/carrier/websocket.go b/carrier/websocket.go index 194b5e3d..9f16021c 100644 --- a/carrier/websocket.go +++ b/carrier/websocket.go @@ -8,16 +8,17 @@ import ( "net/http/httputil" "github.com/cloudflare/cloudflared/cmd/cloudflared/token" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/socks" cfwebsocket "github.com/cloudflare/cloudflared/websocket" + "github.com/gorilla/websocket" + "github.com/rs/zerolog" ) // Websocket is used to carry data via WS binary frames over the tunnel from client to the origin // This implements the functions for glider proxy (sock5) and the carrier interface type Websocket struct { - logger logger.Service + log *zerolog.Logger isSocks bool } @@ -36,9 +37,9 @@ func (d *wsdialer) Dial(address string) (io.ReadWriteCloser, *socks.AddrSpec, er } // NewWSConnection returns a new connection object -func NewWSConnection(logger logger.Service, isSocks bool) Connection { +func NewWSConnection(log *zerolog.Logger, isSocks bool) Connection { return &Websocket{ - logger: logger, + log: log, isSocks: isSocks, } } @@ -46,9 +47,9 @@ func NewWSConnection(logger logger.Service, isSocks bool) Connection { // ServeStream will create a Websocket client stream connection to the edge // it blocks and writes the raw data from conn over the tunnel func (ws *Websocket) ServeStream(options *StartOptions, conn io.ReadWriter) error { - wsConn, err := createWebsocketStream(options, ws.logger) + wsConn, err := createWebsocketStream(options, ws.log) if err != nil { - ws.logger.Errorf("failed to connect to %s with error: %s", options.OriginURL, err) + ws.log.Error().Msgf("failed to connect to %s with error: %s", options.OriginURL, err) return err } defer wsConn.Close() @@ -58,7 +59,7 @@ func (ws *Websocket) ServeStream(options *StartOptions, conn io.ReadWriter) erro requestHandler := socks.NewRequestHandler(dialer) socksServer := socks.NewConnectionHandler(requestHandler) - socksServer.Serve(conn) + _ = socksServer.Serve(conn) } else { cfwebsocket.Stream(wsConn, conn) } @@ -68,13 +69,13 @@ func (ws *Websocket) ServeStream(options *StartOptions, conn io.ReadWriter) erro // StartServer creates a Websocket server to listen for connections. // This is used on the origin (tunnel) side to take data from the muxer and send it to the origin func (ws *Websocket) StartServer(listener net.Listener, remote string, shutdownC <-chan struct{}) error { - return cfwebsocket.StartProxyServer(ws.logger, listener, remote, shutdownC, cfwebsocket.DefaultStreamHandler) + return cfwebsocket.StartProxyServer(ws.log, listener, remote, shutdownC, cfwebsocket.DefaultStreamHandler) } // createWebsocketStream will create a WebSocket connection to stream data over // It also handles redirects from Access and will present that flow if // the token is not present on the request -func createWebsocketStream(options *StartOptions, logger logger.Service) (*cfwebsocket.Conn, error) { +func createWebsocketStream(options *StartOptions, log *zerolog.Logger) (*cfwebsocket.Conn, error) { req, err := http.NewRequest(http.MethodGet, options.OriginURL, nil) if err != nil { return nil, err @@ -82,13 +83,13 @@ func createWebsocketStream(options *StartOptions, logger logger.Service) (*cfweb req.Header = options.Headers dump, err := httputil.DumpRequest(req, false) - logger.Debugf("Websocket request: %s", string(dump)) + log.Debug().Msgf("Websocket request: %s", string(dump)) wsConn, resp, err := cfwebsocket.ClientConnect(req, nil) defer closeRespBody(resp) if err != nil && IsAccessResponse(resp) { - wsConn, err = createAccessAuthenticatedStream(options, logger) + wsConn, err = createAccessAuthenticatedStream(options, log) if err != nil { return nil, err } @@ -104,8 +105,8 @@ func createWebsocketStream(options *StartOptions, logger logger.Service) (*cfweb // this probably means the token in storage is invalid (expired/revoked). If that // happens it deletes the token and runs the connection again, so the user can // login again and generate a new one. -func createAccessAuthenticatedStream(options *StartOptions, logger logger.Service) (*websocket.Conn, error) { - wsConn, resp, err := createAccessWebSocketStream(options, logger) +func createAccessAuthenticatedStream(options *StartOptions, log *zerolog.Logger) (*websocket.Conn, error) { + wsConn, resp, err := createAccessWebSocketStream(options, log) defer closeRespBody(resp) if err == nil { return wsConn, nil @@ -123,7 +124,7 @@ func createAccessAuthenticatedStream(options *StartOptions, logger logger.Servic if err := token.RemoveTokenIfExists(originReq.URL); err != nil { return nil, err } - wsConn, resp, err = createAccessWebSocketStream(options, logger) + wsConn, resp, err = createAccessWebSocketStream(options, log) defer closeRespBody(resp) if err != nil { return nil, err @@ -133,23 +134,23 @@ func createAccessAuthenticatedStream(options *StartOptions, logger logger.Servic } // createAccessWebSocketStream builds an Access request and makes a connection -func createAccessWebSocketStream(options *StartOptions, logger logger.Service) (*websocket.Conn, *http.Response, error) { - req, err := BuildAccessRequest(options, logger) +func createAccessWebSocketStream(options *StartOptions, log *zerolog.Logger) (*websocket.Conn, *http.Response, error) { + req, err := BuildAccessRequest(options, log) if err != nil { return nil, nil, err } dump, err := httputil.DumpRequest(req, false) - logger.Debugf("Access Websocket request: %s", string(dump)) + log.Debug().Msgf("Access Websocket request: %s", string(dump)) conn, resp, err := cfwebsocket.ClientConnect(req, nil) if resp != nil { r, err := httputil.DumpResponse(resp, true) if r != nil { - logger.Debugf("Websocket response: %q", r) + log.Debug().Msgf("Websocket response: %q", r) } else if err != nil { - logger.Debugf("Websocket response error: %v", err) + log.Debug().Msgf("Websocket response error: %v", err) } } diff --git a/cmd/cloudflared/access/carrier.go b/cmd/cloudflared/access/carrier.go index 87a4584e..1cbacca8 100644 --- a/cmd/cloudflared/access/carrier.go +++ b/cmd/cloudflared/access/carrier.go @@ -5,17 +5,18 @@ import ( "strings" "github.com/cloudflare/cloudflared/carrier" - "github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil" "github.com/cloudflare/cloudflared/cmd/cloudflared/config" "github.com/cloudflare/cloudflared/h2mux" "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/validation" + "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/urfave/cli/v2" ) // StartForwarder starts a client side websocket forward -func StartForwarder(forwarder config.Forwarder, shutdown <-chan struct{}, logger logger.Service) error { +func StartForwarder(forwarder config.Forwarder, shutdown <-chan struct{}, log *zerolog.Logger) error { validURL, err := validation.ValidateUrl(forwarder.Listener) if err != nil { return errors.Wrap(err, "error validating origin URL") @@ -41,9 +42,9 @@ func StartForwarder(forwarder config.Forwarder, shutdown <-chan struct{}, logger } // we could add a cmd line variable for this bool if we want the SOCK5 server to be on the client side - wsConn := carrier.NewWSConnection(logger, false) + wsConn := carrier.NewWSConnection(log, false) - logger.Infof("Start Websocket listener on: %s", validURL.Host) + log.Info().Msgf("Start Websocket listener on: %s", validURL.Host) return carrier.StartForwarder(wsConn, validURL.Host, shutdown, options) } @@ -52,10 +53,7 @@ func StartForwarder(forwarder config.Forwarder, shutdown <-chan struct{}, logger // useful for proxying other protocols (like ssh) over websockets // (which you can put Access in front of) func ssh(c *cli.Context) error { - logger, err := logger.CreateSSHLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return cliutil.PrintLoggerSetupError("error setting up logger", err) - } + log := logger.CreateSSHLoggerFromContext(c, logger.EnableTerminalLog) // get the hostname from the cmdline and error out if its not provided rawHostName := c.String(sshHostnameFlag) @@ -85,19 +83,19 @@ func ssh(c *cli.Context) error { } // we could add a cmd line variable for this bool if we want the SOCK5 server to be on the client side - wsConn := carrier.NewWSConnection(logger, false) + wsConn := carrier.NewWSConnection(log, false) if c.NArg() > 0 || c.IsSet(sshURLFlag) { forwarder, err := config.ValidateUrl(c, true) if err != nil { - logger.Errorf("Error validating origin URL: %s", err) + log.Error().Msgf("Error validating origin URL: %s", err) return errors.Wrap(err, "error validating origin URL") } - logger.Infof("Start Websocket listener on: %s", forwarder.Host) + log.Info().Msgf("Start Websocket listener on: %s", forwarder.Host) err = carrier.StartForwarder(wsConn, forwarder.Host, shutdownC, options) if err != nil { - logger.Errorf("Error on Websocket listener: %s", err) + log.Error().Msgf("Error on Websocket listener: %s", err) } return err } diff --git a/cmd/cloudflared/access/cmd.go b/cmd/cloudflared/access/cmd.go index 9ef3abb9..83766597 100644 --- a/cmd/cloudflared/access/cmd.go +++ b/cmd/cloudflared/access/cmd.go @@ -17,11 +17,12 @@ import ( "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/sshgen" "github.com/cloudflare/cloudflared/validation" - "github.com/pkg/errors" - "golang.org/x/net/idna" "github.com/getsentry/raven-go" + "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/urfave/cli/v2" + "golang.org/x/net/idna" ) const ( @@ -205,20 +206,17 @@ func login(c *cli.Context) error { return err } - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) args := c.Args() rawURL := ensureURLScheme(args.First()) appURL, err := url.Parse(rawURL) if args.Len() < 1 || err != nil { - logger.Errorf("Please provide the url of the Access application\n") + log.Error().Msgf("Please provide the url of the Access application\n") return err } - if err := verifyTokenAtEdge(appURL, c, logger); err != nil { - logger.Errorf("Could not verify token: %s", err) + if err := verifyTokenAtEdge(appURL, c, log); err != nil { + log.Error().Msgf("Could not verify token: %s", err) return err } @@ -250,19 +248,16 @@ func curl(c *cli.Context) error { if err := raven.SetDSN(sentryDSN); err != nil { return err } - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) args := c.Args() if args.Len() < 1 { - logger.Error("Please provide the access app and command you wish to run.") + log.Error().Msg("Please provide the access app and command you wish to run.") return errors.New("incorrect args") } cmdArgs, allowRequest := parseAllowRequest(args.Slice()) - appURL, err := getAppURL(cmdArgs, logger) + appURL, err := getAppURL(cmdArgs, log) if err != nil { return err } @@ -270,12 +265,12 @@ func curl(c *cli.Context) error { tok, err := token.GetAppTokenIfExists(appURL) if err != nil || tok == "" { if allowRequest { - logger.Info("You don't have an Access token set. Please run access token to fetch one.") + log.Info().Msg("You don't have an Access token set. Please run access token to fetch one.") return shell.Run("curl", cmdArgs...) } - tok, err = token.FetchToken(appURL, logger) + tok, err = token.FetchToken(appURL, log) if err != nil { - logger.Errorf("Failed to refresh token: %s", err) + log.Error().Msgf("Failed to refresh token: %s", err) return err } } @@ -329,10 +324,7 @@ func sshConfig(c *cli.Context) error { // sshGen generates a short lived certificate for provided hostname func sshGen(c *cli.Context) error { - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) // get the hostname from the cmdline and error out if its not provided rawHostName := c.String(sshHostnameFlag) @@ -349,7 +341,7 @@ func sshGen(c *cli.Context) error { // this fetchToken function mutates the appURL param. We should refactor that fetchTokenURL := &url.URL{} *fetchTokenURL = *originURL - cfdToken, err := token.FetchTokenWithRedirect(fetchTokenURL, logger) + cfdToken, err := token.FetchTokenWithRedirect(fetchTokenURL, log) if err != nil { return err } @@ -362,15 +354,15 @@ func sshGen(c *cli.Context) error { } // getAppURL will pull the appURL needed for fetching a user's Access token -func getAppURL(cmdArgs []string, logger logger.Service) (*url.URL, error) { +func getAppURL(cmdArgs []string, log *zerolog.Logger) (*url.URL, error) { if len(cmdArgs) < 1 { - logger.Error("Please provide a valid URL as the first argument to curl.") + log.Error().Msg("Please provide a valid URL as the first argument to curl.") return nil, errors.New("not a valid url") } u, err := processURL(cmdArgs[0]) if err != nil { - logger.Error("Please provide a valid URL as the first argument to curl.") + log.Error().Msg("Please provide a valid URL as the first argument to curl.") return nil, err } @@ -436,7 +428,7 @@ func isFileThere(candidate string) bool { // verifyTokenAtEdge checks for a token on disk, or generates a new one. // Then makes a request to to the origin with the token to ensure it is valid. // Returns nil if token is valid. -func verifyTokenAtEdge(appUrl *url.URL, c *cli.Context, logger logger.Service) error { +func verifyTokenAtEdge(appUrl *url.URL, c *cli.Context, log *zerolog.Logger) error { headers := buildRequestHeaders(c.StringSlice(sshHeaderFlag)) if c.IsSet(sshTokenIDFlag) { headers.Add(h2mux.CFAccessClientIDHeader, c.String(sshTokenIDFlag)) @@ -446,7 +438,7 @@ func verifyTokenAtEdge(appUrl *url.URL, c *cli.Context, logger logger.Service) e } options := &carrier.StartOptions{OriginURL: appUrl.String(), Headers: headers} - if valid, err := isTokenValid(options, logger); err != nil { + if valid, err := isTokenValid(options, log); err != nil { return err } else if valid { return nil @@ -456,7 +448,7 @@ func verifyTokenAtEdge(appUrl *url.URL, c *cli.Context, logger logger.Service) e return err } - if valid, err := isTokenValid(options, logger); err != nil { + if valid, err := isTokenValid(options, log); err != nil { return err } else if !valid { return errors.New("failed to verify token") @@ -466,8 +458,8 @@ func verifyTokenAtEdge(appUrl *url.URL, c *cli.Context, logger logger.Service) e } // isTokenValid makes a request to the origin and returns true if the response was not a 302. -func isTokenValid(options *carrier.StartOptions, logger logger.Service) (bool, error) { - req, err := carrier.BuildAccessRequest(options, logger) +func isTokenValid(options *carrier.StartOptions, log *zerolog.Logger) (bool, error) { + req, err := carrier.BuildAccessRequest(options, log) if err != nil { return false, errors.Wrap(err, "Could not create access request") } diff --git a/cmd/cloudflared/app_forward_service.go b/cmd/cloudflared/app_forward_service.go index f815b5c7..4ea42dd6 100644 --- a/cmd/cloudflared/app_forward_service.go +++ b/cmd/cloudflared/app_forward_service.go @@ -3,7 +3,8 @@ package main import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/access" "github.com/cloudflare/cloudflared/cmd/cloudflared/config" - "github.com/cloudflare/cloudflared/logger" + + "github.com/rs/zerolog" ) // ForwardServiceType is used to identify what kind of overwatch service this is @@ -15,12 +16,12 @@ const ForwardServiceType = "forward" type ForwarderService struct { forwarder config.Forwarder shutdown chan struct{} - logger logger.Service + log *zerolog.Logger } // NewForwardService creates a new forwarder service -func NewForwardService(f config.Forwarder, logger logger.Service) *ForwarderService { - return &ForwarderService{forwarder: f, shutdown: make(chan struct{}, 1), logger: logger} +func NewForwardService(f config.Forwarder, log *zerolog.Logger) *ForwarderService { + return &ForwarderService{forwarder: f, shutdown: make(chan struct{}, 1), log: log} } // Name is used to figure out this service is related to the others (normally the addr it binds to) @@ -46,5 +47,5 @@ func (s *ForwarderService) Shutdown() { // Run is the run loop that is started by the overwatch service func (s *ForwarderService) Run() error { - return access.StartForwarder(s.forwarder, s.shutdown, s.logger) + return access.StartForwarder(s.forwarder, s.shutdown, s.log) } diff --git a/cmd/cloudflared/app_resolver_service.go b/cmd/cloudflared/app_resolver_service.go index 1ba19f17..4d26de3b 100644 --- a/cmd/cloudflared/app_resolver_service.go +++ b/cmd/cloudflared/app_resolver_service.go @@ -2,8 +2,9 @@ package main import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/config" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/tunneldns" + + "github.com/rs/zerolog" ) // ResolverServiceType is used to identify what kind of overwatch service this is @@ -15,14 +16,14 @@ const ResolverServiceType = "resolver" type ResolverService struct { resolver config.DNSResolver shutdown chan struct{} - logger logger.Service + log *zerolog.Logger } // NewResolverService creates a new resolver service -func NewResolverService(r config.DNSResolver, logger logger.Service) *ResolverService { +func NewResolverService(r config.DNSResolver, log *zerolog.Logger) *ResolverService { return &ResolverService{resolver: r, shutdown: make(chan struct{}), - logger: logger, + log: log, } } @@ -51,7 +52,7 @@ func (s *ResolverService) Shutdown() { func (s *ResolverService) Run() error { // create a listener l, err := tunneldns.CreateListener(s.resolver.AddressOrDefault(), s.resolver.PortOrDefault(), - s.resolver.UpstreamsOrDefault(), s.resolver.BootstrapsOrDefault(), s.logger) + s.resolver.UpstreamsOrDefault(), s.resolver.BootstrapsOrDefault(), s.log) if err != nil { return err } @@ -60,14 +61,14 @@ func (s *ResolverService) Run() error { readySignal := make(chan struct{}) err = l.Start(readySignal) if err != nil { - l.Stop() + _ = l.Stop() return err } <-readySignal - s.logger.Infof("start resolver on: %s:%d", s.resolver.AddressOrDefault(), s.resolver.PortOrDefault()) + s.log.Info().Msgf("start resolver on: %s:%d", s.resolver.AddressOrDefault(), s.resolver.PortOrDefault()) // wait for shutdown signal <-s.shutdown - s.logger.Infof("shutdown on: %s:%d", s.resolver.AddressOrDefault(), s.resolver.PortOrDefault()) + s.log.Info().Msgf("shutdown on: %s:%d", s.resolver.AddressOrDefault(), s.resolver.PortOrDefault()) return l.Stop() } diff --git a/cmd/cloudflared/app_service.go b/cmd/cloudflared/app_service.go index 5f5197d0..8289959e 100644 --- a/cmd/cloudflared/app_service.go +++ b/cmd/cloudflared/app_service.go @@ -2,8 +2,9 @@ package main import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/config" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/overwatch" + + "github.com/rs/zerolog" ) // AppService is the main service that runs when no command lines flags are passed to cloudflared @@ -13,17 +14,17 @@ type AppService struct { serviceManager overwatch.Manager shutdownC chan struct{} configUpdateChan chan config.Root - logger logger.Service + log *zerolog.Logger } // NewAppService creates a new AppService with needed supporting services -func NewAppService(configManager config.Manager, serviceManager overwatch.Manager, shutdownC chan struct{}, logger logger.Service) *AppService { +func NewAppService(configManager config.Manager, serviceManager overwatch.Manager, shutdownC chan struct{}, log *zerolog.Logger) *AppService { return &AppService{ configManager: configManager, serviceManager: serviceManager, shutdownC: shutdownC, configUpdateChan: make(chan config.Root), - logger: logger, + log: log, } } @@ -67,14 +68,14 @@ func (s *AppService) handleConfigUpdate(c config.Root) { // handle the client forward listeners activeServices := map[string]struct{}{} for _, f := range c.Forwarders { - service := NewForwardService(f, s.logger) + service := NewForwardService(f, s.log) s.serviceManager.Add(service) activeServices[service.Name()] = struct{}{} } // handle resolver changes if c.Resolver.Enabled { - service := NewResolverService(c.Resolver, s.logger) + service := NewResolverService(c.Resolver, s.log) s.serviceManager.Add(service) activeServices[service.Name()] = struct{}{} diff --git a/cmd/cloudflared/buildinfo/build_info.go b/cmd/cloudflared/buildinfo/build_info.go index aedface1..05d38fda 100644 --- a/cmd/cloudflared/buildinfo/build_info.go +++ b/cmd/cloudflared/buildinfo/build_info.go @@ -1,9 +1,8 @@ package buildinfo import ( + "github.com/rs/zerolog" "runtime" - - "github.com/cloudflare/cloudflared/logger" ) type BuildInfo struct { @@ -22,7 +21,7 @@ func GetBuildInfo(cloudflaredVersion string) *BuildInfo { } } -func (bi *BuildInfo) Log(logger logger.Service) { - logger.Infof("Version %s", bi.CloudflaredVersion) - logger.Infof("GOOS: %s, GOVersion: %s, GoArch: %s", bi.GoOS, bi.GoVersion, bi.GoArch) +func (bi *BuildInfo) Log(log *zerolog.Logger) { + log.Info().Msgf("Version %s", bi.CloudflaredVersion) + log.Info().Msgf("GOOS: %s, GOVersion: %s, GoArch: %s", bi.GoOS, bi.GoVersion, bi.GoArch) } diff --git a/cmd/cloudflared/cliutil/errors.go b/cmd/cloudflared/cliutil/errors.go index 902ad2ff..60ca40b5 100644 --- a/cmd/cloudflared/cliutil/errors.go +++ b/cmd/cloudflared/cliutil/errors.go @@ -2,10 +2,6 @@ package cliutil import ( "fmt" - "log" - - "github.com/cloudflare/cloudflared/logger" - "github.com/pkg/errors" "github.com/urfave/cli/v2" ) @@ -27,8 +23,6 @@ func UsageError(format string, args ...interface{}) error { // Ensures exit with error code if actionFunc returns an error func ErrorHandler(actionFunc cli.ActionFunc) cli.ActionFunc { return func(ctx *cli.Context) error { - defer logger.SharedWriteManager.Shutdown() - err := actionFunc(ctx) if err != nil { if _, ok := err.(usageError); ok { @@ -41,15 +35,3 @@ func ErrorHandler(actionFunc cli.ActionFunc) cli.ActionFunc { return err } } - -// PrintLoggerSetupError returns an error to stdout to notify when a logger can't start -func PrintLoggerSetupError(msg string, err error) error { - l, le := logger.New() - if le != nil { - log.Printf("%s: %s", msg, err) - } else { - l.Errorf("%s: %s", msg, err) - } - - return errors.Wrap(err, msg) -} diff --git a/cmd/cloudflared/config/configuration.go b/cmd/cloudflared/config/configuration.go index ce27505e..035442be 100644 --- a/cmd/cloudflared/config/configuration.go +++ b/cmd/cloudflared/config/configuration.go @@ -9,13 +9,13 @@ import ( "runtime" "time" - homedir "github.com/mitchellh/go-homedir" + "github.com/mitchellh/go-homedir" "github.com/pkg/errors" "github.com/urfave/cli/v2" "gopkg.in/yaml.v2" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/validation" + "github.com/rs/zerolog" ) var ( @@ -95,7 +95,7 @@ func FileExists(path string) (bool, error) { } return false, err } - f.Close() + _ = f.Close() return true, nil } @@ -138,7 +138,7 @@ func FindOrCreateConfigPath() string { defer file.Close() logDir := DefaultLogDirectory() - os.MkdirAll(logDir, os.ModePerm) //try and create it. Doesn't matter if it succeed or not, only byproduct will be no logs + _ = os.MkdirAll(logDir, os.ModePerm) //try and create it. Doesn't matter if it succeed or not, only byproduct will be no logs c := Root{ LogDirectory: logDir, @@ -345,7 +345,7 @@ func GetConfiguration() *Configuration { // ReadConfigFile returns InputSourceContext initialized from the configuration file. // On repeat calls returns with the same file, returns without reading the file again; however, // if value of "config" flag changes, will read the new config file -func ReadConfigFile(c *cli.Context, log logger.Service) (*configFileSettings, error) { +func ReadConfigFile(c *cli.Context, log *zerolog.Logger) (*configFileSettings, error) { configFile := c.String("config") if configuration.Source() == configFile || configFile == "" { if configuration.Source() == "" { @@ -354,7 +354,7 @@ func ReadConfigFile(c *cli.Context, log logger.Service) (*configFileSettings, er return &configuration, nil } - log.Debugf("Loading configuration from %s", configFile) + log.Debug().Msgf("Loading configuration from %s", configFile) file, err := os.Open(configFile) if err != nil { if os.IsNotExist(err) { @@ -365,7 +365,7 @@ func ReadConfigFile(c *cli.Context, log logger.Service) (*configFileSettings, er defer file.Close() if err := yaml.NewDecoder(file).Decode(&configuration); err != nil { if err == io.EOF { - log.Errorf("Configuration file %s was empty", configFile) + log.Error().Msgf("Configuration file %s was empty", configFile) return &configuration, nil } return nil, errors.Wrap(err, "error parsing YAML in config file at "+configFile) diff --git a/cmd/cloudflared/config/manager.go b/cmd/cloudflared/config/manager.go index d299c06a..a3a28d96 100644 --- a/cmd/cloudflared/config/manager.go +++ b/cmd/cloudflared/config/manager.go @@ -4,9 +4,10 @@ import ( "io" "os" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/watcher" + "github.com/pkg/errors" + "github.com/rs/zerolog" "gopkg.in/yaml.v2" ) @@ -27,16 +28,16 @@ type FileManager struct { watcher watcher.Notifier notifier Notifier configPath string - logger logger.Service - ReadConfig func(string, logger.Service) (Root, error) + log *zerolog.Logger + ReadConfig func(string, *zerolog.Logger) (Root, error) } // NewFileManager creates a config manager -func NewFileManager(watcher watcher.Notifier, configPath string, logger logger.Service) (*FileManager, error) { +func NewFileManager(watcher watcher.Notifier, configPath string, log *zerolog.Logger) (*FileManager, error) { m := &FileManager{ watcher: watcher, configPath: configPath, - logger: logger, + log: log, ReadConfig: readConfigFromPath, } err := watcher.Add(configPath) @@ -60,7 +61,7 @@ func (m *FileManager) Start(notifier Notifier) error { // GetConfig reads the yaml file from the disk func (m *FileManager) GetConfig() (Root, error) { - return m.ReadConfig(m.configPath, m.logger) + return m.ReadConfig(m.configPath, m.log) } // Shutdown stops the watcher @@ -68,7 +69,7 @@ func (m *FileManager) Shutdown() { m.watcher.Shutdown() } -func readConfigFromPath(configPath string, log logger.Service) (Root, error) { +func readConfigFromPath(configPath string, log *zerolog.Logger) (Root, error) { if configPath == "" { return Root{}, errors.New("unable to find config file") } @@ -82,7 +83,7 @@ func readConfigFromPath(configPath string, log logger.Service) (Root, error) { var config Root if err := yaml.NewDecoder(file).Decode(&config); err != nil { if err == io.EOF { - log.Errorf("Configuration file %s was empty", configPath) + log.Error().Msgf("Configuration file %s was empty", configPath) return Root{}, nil } return Root{}, errors.Wrap(err, "error parsing YAML in config file at "+configPath) @@ -98,14 +99,14 @@ func readConfigFromPath(configPath string, log logger.Service) (Root, error) { func (m *FileManager) WatcherItemDidChange(filepath string) { config, err := m.GetConfig() if err != nil { - m.logger.Errorf("Failed to read new config: %s", err) + m.log.Error().Msgf("Failed to read new config: %s", err) return } - m.logger.Info("Config file has been updated") + m.log.Info().Msg("Config file has been updated") m.notifier.ConfigDidUpdate(config) } // WatcherDidError notifies of errors with the file watcher func (m *FileManager) WatcherDidError(err error) { - m.logger.Errorf("Config watcher encountered an error: %s", err) + m.log.Error().Msgf("Config watcher encountered an error: %s", err) } diff --git a/cmd/cloudflared/config/manager_test.go b/cmd/cloudflared/config/manager_test.go index 116f5a21..ef752971 100644 --- a/cmd/cloudflared/config/manager_test.go +++ b/cmd/cloudflared/config/manager_test.go @@ -4,10 +4,10 @@ import ( "os" "testing" - "github.com/stretchr/testify/assert" - - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/watcher" + + "github.com/rs/zerolog" + "github.com/stretchr/testify/assert" ) type mockNotifier struct { @@ -46,8 +46,8 @@ func TestConfigChanged(t *testing.T) { f, err := os.Create(filePath) assert.NoError(t, err) defer func() { - f.Close() - os.Remove(filePath) + _ = f.Close() + _ = os.Remove(filePath) }() c := &Root{ Forwarders: []Forwarder{ @@ -57,15 +57,15 @@ func TestConfigChanged(t *testing.T) { }, }, } - configRead := func(configPath string, log logger.Service) (Root, error) { + configRead := func(configPath string, log *zerolog.Logger) (Root, error) { return *c, nil } wait := make(chan struct{}) w := &mockFileWatcher{path: filePath, ready: wait} - logger := logger.NewOutputWriter(logger.NewMockWriteManager()) + log := zerolog.Nop() - service, err := NewFileManager(w, filePath, logger) + service, err := NewFileManager(w, filePath, &log) service.ReadConfig = configRead assert.NoError(t, err) diff --git a/cmd/cloudflared/linux_service.go b/cmd/cloudflared/linux_service.go index 4fd34cfc..76e6fb7b 100644 --- a/cmd/cloudflared/linux_service.go +++ b/cmd/cloudflared/linux_service.go @@ -7,13 +7,13 @@ import ( "os" "path/filepath" - "github.com/pkg/errors" - cli "github.com/urfave/cli/v2" - "github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil" "github.com/cloudflare/cloudflared/cmd/cloudflared/config" "github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel" "github.com/cloudflare/cloudflared/logger" + + "github.com/rs/zerolog" + "github.com/urfave/cli/v2" ) func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) { @@ -21,7 +21,7 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) { Name: "service", Usage: "Manages the Argo Tunnel system service", Subcommands: []*cli.Command{ - &cli.Command{ + { Name: "install", Usage: "Install Argo Tunnel as a system service", Action: cliutil.ErrorHandler(installLinuxService), @@ -32,7 +32,7 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) { }, }, }, - &cli.Command{ + { Name: "uninstall", Usage: "Uninstall the Argo Tunnel service", Action: cliutil.ErrorHandler(uninstallLinuxService), @@ -190,7 +190,7 @@ func isSystemd() bool { return false } -func copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile string, logger logger.Service) error { +func copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile string, log *zerolog.Logger) error { srcCredentialPath := filepath.Join(userConfigDir, userCredentialFile) destCredentialPath := filepath.Join(serviceConfigDir, serviceCredentialFile) if srcCredentialPath != destCredentialPath { @@ -205,17 +205,14 @@ func copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile str if err := copyConfig(srcConfigPath, destConfigPath); err != nil { return err } - logger.Infof("Copied %s to %s", srcConfigPath, destConfigPath) + log.Info().Msgf("Copied %s to %s", srcConfigPath, destConfigPath) } return nil } func installLinuxService(c *cli.Context) error { - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) etPath, err := os.Executable() if err != nil { @@ -232,8 +229,8 @@ func installLinuxService(c *cli.Context) error { userConfigDir := filepath.Dir(c.String("config")) userConfigFile := filepath.Base(c.String("config")) userCredentialFile := config.DefaultCredentialFile - if err = copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile, logger); err != nil { - logger.Errorf("Failed to copy user configuration: %s. Before running the service, ensure that %s contains two files, %s and %s", err, + if err = copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile, log); err != nil { + log.Error().Msgf("Failed to copy user configuration: %s. Before running the service, ensure that %s contains two files, %s and %s", err, serviceConfigDir, serviceCredentialFile, serviceConfigFile) return err } @@ -241,7 +238,7 @@ func installLinuxService(c *cli.Context) error { "--origincert", serviceConfigDir + "/" + serviceCredentialFile, } } else { - src, err := config.ReadConfigFile(c, logger) + src, err := config.ReadConfigFile(c, log) if err != nil { return err } @@ -274,42 +271,42 @@ credentials-file: CREDENTIALS-FILE switch { case isSystemd(): - logger.Infof("Using Systemd") - return installSystemd(&templateArgs, logger) + log.Info().Msgf("Using Systemd") + return installSystemd(&templateArgs, log) default: - logger.Infof("Using SysV") - return installSysv(&templateArgs, logger) + log.Info().Msgf("Using SysV") + return installSysv(&templateArgs, log) } } -func installSystemd(templateArgs *ServiceTemplateArgs, logger logger.Service) error { +func installSystemd(templateArgs *ServiceTemplateArgs, log *zerolog.Logger) error { for _, serviceTemplate := range systemdTemplates { err := serviceTemplate.Generate(templateArgs) if err != nil { - logger.Errorf("error generating service template: %s", err) + log.Error().Msgf("error generating service template: %s", err) return err } } if err := runCommand("systemctl", "enable", "cloudflared.service"); err != nil { - logger.Errorf("systemctl enable cloudflared.service error: %s", err) + log.Error().Msgf("systemctl enable cloudflared.service error: %s", err) return err } if err := runCommand("systemctl", "start", "cloudflared-update.timer"); err != nil { - logger.Errorf("systemctl start cloudflared-update.timer error: %s", err) + log.Error().Msgf("systemctl start cloudflared-update.timer error: %s", err) return err } - logger.Infof("systemctl daemon-reload") + log.Info().Msgf("systemctl daemon-reload") return runCommand("systemctl", "daemon-reload") } -func installSysv(templateArgs *ServiceTemplateArgs, logger logger.Service) error { +func installSysv(templateArgs *ServiceTemplateArgs, log *zerolog.Logger) error { confPath, err := sysvTemplate.ResolvePath() if err != nil { - logger.Errorf("error resolving system path: %s", err) + log.Error().Msgf("error resolving system path: %s", err) return err } if err := sysvTemplate.Generate(templateArgs); err != nil { - logger.Errorf("error generating system template: %s", err) + log.Error().Msgf("error generating system template: %s", err) return err } for _, i := range [...]string{"2", "3", "4", "5"} { @@ -326,43 +323,40 @@ func installSysv(templateArgs *ServiceTemplateArgs, logger logger.Service) error } func uninstallLinuxService(c *cli.Context) error { - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) switch { case isSystemd(): - logger.Infof("Using Systemd") - return uninstallSystemd(logger) + log.Info().Msgf("Using Systemd") + return uninstallSystemd(log) default: - logger.Infof("Using SysV") - return uninstallSysv(logger) + log.Info().Msgf("Using SysV") + return uninstallSysv(log) } } -func uninstallSystemd(logger logger.Service) error { +func uninstallSystemd(log *zerolog.Logger) error { if err := runCommand("systemctl", "disable", "cloudflared.service"); err != nil { - logger.Errorf("systemctl disable cloudflared.service error: %s", err) + log.Error().Msgf("systemctl disable cloudflared.service error: %s", err) return err } if err := runCommand("systemctl", "stop", "cloudflared-update.timer"); err != nil { - logger.Errorf("systemctl stop cloudflared-update.timer error: %s", err) + log.Error().Msgf("systemctl stop cloudflared-update.timer error: %s", err) return err } for _, serviceTemplate := range systemdTemplates { if err := serviceTemplate.Remove(); err != nil { - logger.Errorf("error removing service template: %s", err) + log.Error().Msgf("error removing service template: %s", err) return err } } - logger.Infof("Successfully uninstall cloudflared service") + log.Info().Msgf("Successfully uninstall cloudflared service") return nil } -func uninstallSysv(logger logger.Service) error { +func uninstallSysv(log *zerolog.Logger) error { if err := sysvTemplate.Remove(); err != nil { - logger.Errorf("error removing service template: %s", err) + log.Error().Msgf("error removing service template: %s", err) return err } for _, i := range [...]string{"2", "3", "4", "5"} { @@ -375,6 +369,6 @@ func uninstallSysv(logger logger.Service) error { continue } } - logger.Infof("Successfully uninstall cloudflared service") + log.Info().Msgf("Successfully uninstall cloudflared service") return nil } diff --git a/cmd/cloudflared/macos_service.go b/cmd/cloudflared/macos_service.go index 0970fa04..1175e3ee 100644 --- a/cmd/cloudflared/macos_service.go +++ b/cmd/cloudflared/macos_service.go @@ -34,7 +34,7 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) { }, }, }) - app.Run(os.Args) + _ = app.Run(os.Args) } func newLaunchdTemplate(installPath, stdoutPath, stderrPath string) *ServiceTemplate { @@ -107,71 +107,61 @@ func stderrPath() (string, error) { } func installLaunchd(c *cli.Context) error { - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) if isRootUser() { - logger.Infof("Installing Argo Tunnel client as a system launch daemon. " + + log.Info().Msgf("Installing Argo Tunnel client as a system launch daemon. " + "Argo Tunnel client will run at boot") } else { - logger.Infof("Installing Argo Tunnel client as an user launch agent. " + + log.Info().Msgf("Installing Argo Tunnel client as an user launch agent. " + "Note that Argo Tunnel client will only run when the user is logged in. " + "If you want to run Argo Tunnel client at boot, install with root permission. " + "For more information, visit https://developers.cloudflare.com/argo-tunnel/reference/service/") } etPath, err := os.Executable() if err != nil { - logger.Errorf("Error determining executable path: %s", err) + log.Error().Msgf("Error determining executable path: %s", err) return fmt.Errorf("Error determining executable path: %v", err) } installPath, err := installPath() if err != nil { - logger.Errorf("Error determining install path: %s", err) + log.Error().Msgf("Error determining install path: %s", err) return errors.Wrap(err, "Error determining install path") } stdoutPath, err := stdoutPath() if err != nil { - logger.Errorf("error determining stdout path: %s", err) + log.Error().Msgf("error determining stdout path: %s", err) return errors.Wrap(err, "error determining stdout path") } stderrPath, err := stderrPath() if err != nil { - logger.Errorf("error determining stderr path: %s", err) + log.Error().Msgf("error determining stderr path: %s", err) return errors.Wrap(err, "error determining stderr path") } launchdTemplate := newLaunchdTemplate(installPath, stdoutPath, stderrPath) - if err != nil { - logger.Errorf("error creating launchd template: %s", err) - return errors.Wrap(err, "error creating launchd template") - } templateArgs := ServiceTemplateArgs{Path: etPath} err = launchdTemplate.Generate(&templateArgs) if err != nil { - logger.Errorf("error generating launchd template: %s", err) + log.Error().Msgf("error generating launchd template: %s", err) return err } plistPath, err := launchdTemplate.ResolvePath() if err != nil { - logger.Errorf("error resolving launchd template path: %s", err) + log.Error().Msgf("error resolving launchd template path: %s", err) return err } - logger.Infof("Outputs are logged to %s and %s", stderrPath, stdoutPath) + log.Info().Msgf("Outputs are logged to %s and %s", stderrPath, stdoutPath) return runCommand("launchctl", "load", plistPath) } func uninstallLaunchd(c *cli.Context) error { - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) if isRootUser() { - logger.Infof("Uninstalling Argo Tunnel as a system launch daemon") + log.Info().Msgf("Uninstalling Argo Tunnel as a system launch daemon") } else { - logger.Infof("Uninstalling Argo Tunnel as an user launch agent") + log.Info().Msgf("Uninstalling Argo Tunnel as an user launch agent") } installPath, err := installPath() if err != nil { @@ -186,20 +176,17 @@ func uninstallLaunchd(c *cli.Context) error { return errors.Wrap(err, "error determining stderr path") } launchdTemplate := newLaunchdTemplate(installPath, stdoutPath, stderrPath) - if err != nil { - return errors.Wrap(err, "error creating launchd template") - } plistPath, err := launchdTemplate.ResolvePath() if err != nil { - logger.Errorf("error resolving launchd template path: %s", err) + log.Error().Msgf("error resolving launchd template path: %s", err) return err } err = runCommand("launchctl", "unload", plistPath) if err != nil { - logger.Errorf("error unloading: %s", err) + log.Error().Msgf("error unloading: %s", err) return err } - logger.Infof("Outputs are logged to %s and %s", stderrPath, stdoutPath) + log.Info().Msgf("Outputs are logged to %s and %s", stderrPath, stdoutPath) return launchdTemplate.Remove() } diff --git a/cmd/cloudflared/main.go b/cmd/cloudflared/main.go index 0be2952c..03265158 100644 --- a/cmd/cloudflared/main.go +++ b/cmd/cloudflared/main.go @@ -10,16 +10,16 @@ import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/config" "github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel" "github.com/cloudflare/cloudflared/cmd/cloudflared/updater" - log "github.com/cloudflare/cloudflared/logger" + "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/metrics" "github.com/cloudflare/cloudflared/overwatch" "github.com/cloudflare/cloudflared/tunneldns" "github.com/cloudflare/cloudflared/watcher" + "github.com/getsentry/raven-go" "github.com/mitchellh/go-homedir" - "github.com/urfave/cli/v2" - "github.com/pkg/errors" + "github.com/urfave/cli/v2" ) const ( @@ -184,38 +184,33 @@ func captureError(err error) { // cloudflared was started without any flags func handleServiceMode(c *cli.Context, shutdownC chan struct{}) error { - defer log.SharedWriteManager.Shutdown() - - logger, err := log.CreateLoggerFromContext(c, log.DisableTerminalLog) - if err != nil { - return cliutil.PrintLoggerSetupError("error setting up logger", err) - } + log := logger.CreateLoggerFromContext(c, logger.DisableTerminalLog) // start the main run loop that reads from the config file f, err := watcher.NewFile() if err != nil { - logger.Errorf("Cannot load config file: %s", err) + log.Error().Msgf("Cannot load config file: %s", err) return err } configPath := config.FindOrCreateConfigPath() - configManager, err := config.NewFileManager(f, configPath, logger) + configManager, err := config.NewFileManager(f, configPath, log) if err != nil { - logger.Errorf("Cannot setup config file for monitoring: %s", err) + log.Error().Msgf("Cannot setup config file for monitoring: %s", err) return err } - logger.Infof("monitoring config file at: %s", configPath) + log.Info().Msgf("monitoring config file at: %s", configPath) serviceCallback := func(t string, name string, err error) { if err != nil { - logger.Errorf("%s service: %s encountered an error: %s", t, name, err) + log.Error().Msgf("%s service: %s encountered an error: %s", t, name, err) } } serviceManager := overwatch.NewAppManager(serviceCallback) - appService := NewAppService(configManager, serviceManager, shutdownC, logger) + appService := NewAppService(configManager, serviceManager, shutdownC, log) if err := appService.Run(); err != nil { - logger.Errorf("Failed to start app service: %s", err) + log.Error().Msgf("Failed to start app service: %s", err) return err } return nil diff --git a/cmd/cloudflared/token/token.go b/cmd/cloudflared/token/token.go index f59b5a5e..ed1a90ba 100644 --- a/cmd/cloudflared/token/token.go +++ b/cmd/cloudflared/token/token.go @@ -16,10 +16,11 @@ import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/config" "github.com/cloudflare/cloudflared/cmd/cloudflared/path" "github.com/cloudflare/cloudflared/cmd/cloudflared/transfer" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/origin" + "github.com/coreos/go-oidc/jose" "github.com/pkg/errors" + "github.com/rs/zerolog" ) const ( @@ -97,7 +98,7 @@ func newLock(path string) *lock { func (l *lock) Acquire() error { // Intercept SIGINT and SIGTERM to release lock before exiting l.sigHandler.register(func() { - l.deleteLockFile() + _ = l.deleteLockFile() os.Exit(0) }) @@ -143,18 +144,18 @@ func isTokenLocked(lockFilePath string) bool { // FetchTokenWithRedirect will either load a stored token or generate a new one // it appends the full url as the redirect URL to the access cli request if opening the browser -func FetchTokenWithRedirect(appURL *url.URL, logger logger.Service) (string, error) { - return getToken(appURL, false, logger) +func FetchTokenWithRedirect(appURL *url.URL, log *zerolog.Logger) (string, error) { + return getToken(appURL, false, log) } // FetchToken will either load a stored token or generate a new one // it appends the host of the appURL as the redirect URL to the access cli request if opening the browser -func FetchToken(appURL *url.URL, logger logger.Service) (string, error) { - return getToken(appURL, true, logger) +func FetchToken(appURL *url.URL, log *zerolog.Logger) (string, error) { + return getToken(appURL, true, log) } // getToken will either load a stored token or generate a new one -func getToken(appURL *url.URL, useHostOnly bool, logger logger.Service) (string, error) { +func getToken(appURL *url.URL, useHostOnly bool, log *zerolog.Logger) (string, error) { if token, err := GetAppTokenIfExists(appURL); token != "" && err == nil { return token, nil } @@ -179,7 +180,7 @@ func getToken(appURL *url.URL, useHostOnly bool, logger logger.Service) (string, var orgTokenPath string // Get auth domain to format into org token file path if authDomain, err := getAuthDomain(appURL); err != nil { - logger.Errorf("failed to get auth domain: %s", err) + log.Error().Msgf("failed to get auth domain: %s", err) } else { orgToken, err := GetOrgTokenIfExists(authDomain) if err != nil { @@ -198,7 +199,7 @@ func getToken(appURL *url.URL, useHostOnly bool, logger logger.Service) (string, } if err == nil { if appToken, err := exchangeOrgToken(appURL, orgToken); err != nil { - logger.Debugf("failed to exchange org token for app token: %s", err) + log.Debug().Msgf("failed to exchange org token for app token: %s", err) } else { if err := ioutil.WriteFile(appTokenPath, []byte(appToken), 0600); err != nil { return "", errors.Wrap(err, "failed to write app token to disk") @@ -207,19 +208,19 @@ func getToken(appURL *url.URL, useHostOnly bool, logger logger.Service) (string, } } } - return getTokensFromEdge(appURL, appTokenPath, orgTokenPath, useHostOnly, logger) + return getTokensFromEdge(appURL, appTokenPath, orgTokenPath, useHostOnly, log) } // getTokensFromEdge will attempt to use the transfer service to retrieve an app and org token, save them to disk, // and return the app token. -func getTokensFromEdge(appURL *url.URL, appTokenPath, orgTokenPath string, useHostOnly bool, logger logger.Service) (string, error) { +func getTokensFromEdge(appURL *url.URL, appTokenPath, orgTokenPath string, useHostOnly bool, log *zerolog.Logger) (string, error) { // If no org token exists or if it couldnt be exchanged for an app token, then run the transfer service flow. // this weird parameter is the resource name (token) and the key/value // we want to send to the transfer service. the key is token and the value // is blank (basically just the id generated in the transfer service) - resourceData, err := transfer.Run(appURL, keyName, keyName, "", true, useHostOnly, logger) + resourceData, err := transfer.Run(appURL, keyName, keyName, "", true, useHostOnly, log) if err != nil { return "", errors.Wrap(err, "failed to run transfer service") } diff --git a/cmd/cloudflared/transfer/transfer.go b/cmd/cloudflared/transfer/transfer.go index f4e08016..3037a4b2 100644 --- a/cmd/cloudflared/transfer/transfer.go +++ b/cmd/cloudflared/transfer/transfer.go @@ -12,8 +12,8 @@ import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/encrypter" "github.com/cloudflare/cloudflared/cmd/cloudflared/shell" - "github.com/cloudflare/cloudflared/logger" "github.com/pkg/errors" + "github.com/rs/zerolog" ) const ( @@ -27,7 +27,7 @@ const ( // The "dance" we refer to is building a HTTP request, opening that in a browser waiting for // the user to complete an action, while it long polls in the background waiting for an // action to be completed to download the resource. -func Run(transferURL *url.URL, resourceName, key, value string, shouldEncrypt bool, useHostOnly bool, logger logger.Service) ([]byte, error) { +func Run(transferURL *url.URL, resourceName, key, value string, shouldEncrypt bool, useHostOnly bool, log *zerolog.Logger) ([]byte, error) { encrypterClient, err := encrypter.New("cloudflared_priv.pem", "cloudflared_pub.pem") if err != nil { return nil, err @@ -48,7 +48,7 @@ func Run(transferURL *url.URL, resourceName, key, value string, shouldEncrypt bo var resourceData []byte if shouldEncrypt { - buf, key, err := transferRequest(baseStoreURL+"transfer/"+encrypterClient.PublicKey(), logger) + buf, key, err := transferRequest(baseStoreURL+"transfer/"+encrypterClient.PublicKey(), log) if err != nil { return nil, err } @@ -64,7 +64,7 @@ func Run(transferURL *url.URL, resourceName, key, value string, shouldEncrypt bo resourceData = decrypted } else { - buf, _, err := transferRequest(baseStoreURL+encrypterClient.PublicKey(), logger) + buf, _, err := transferRequest(baseStoreURL+encrypterClient.PublicKey(), log) if err != nil { return nil, err } @@ -96,17 +96,17 @@ func buildRequestURL(baseURL *url.URL, key, value string, cli, useHostOnly bool) } // transferRequest downloads the requested resource from the request URL -func transferRequest(requestURL string, logger logger.Service) ([]byte, string, error) { +func transferRequest(requestURL string, log *zerolog.Logger) ([]byte, string, error) { client := &http.Client{Timeout: clientTimeout} const pollAttempts = 10 // we do "long polling" on the endpoint to get the resource. for i := 0; i < pollAttempts; i++ { - buf, key, err := poll(client, requestURL, logger) + buf, key, err := poll(client, requestURL, log) if err != nil { return nil, "", err } else if len(buf) > 0 { if err := putSuccess(client, requestURL); err != nil { - logger.Errorf("Failed to update resource success: %s", err) + log.Error().Msgf("Failed to update resource success: %s", err) } return buf, key, nil } @@ -115,7 +115,7 @@ func transferRequest(requestURL string, logger logger.Service) ([]byte, string, } // poll the endpoint for the request resource, waiting for the user interaction -func poll(client *http.Client, requestURL string, logger logger.Service) ([]byte, string, error) { +func poll(client *http.Client, requestURL string, log *zerolog.Logger) ([]byte, string, error) { resp, err := client.Get(requestURL) if err != nil { return nil, "", err @@ -128,7 +128,7 @@ func poll(client *http.Client, requestURL string, logger logger.Service) ([]byte return nil, "", fmt.Errorf("error on request %d", resp.StatusCode) } if resp.StatusCode != 200 { - logger.Info("Waiting for login...") + log.Info().Msg("Waiting for login...") return nil, "", nil } diff --git a/cmd/cloudflared/tunnel/cmd.go b/cmd/cloudflared/tunnel/cmd.go index aeb80747..8994e3ad 100644 --- a/cmd/cloudflared/tunnel/cmd.go +++ b/cmd/cloudflared/tunnel/cmd.go @@ -35,6 +35,7 @@ import ( "github.com/google/uuid" "github.com/mitchellh/go-homedir" "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/urfave/cli/v2" "github.com/urfave/cli/v2/altsrc" ) @@ -42,8 +43,6 @@ import ( const ( sentryDSN = "https://56a9c9fa5c364ab28f34b14f35ea0f1b:3e8827f6f9f740738eb11138f7bebb68@sentry.io/189878" - sshLogFileDirectory = "/usr/local/var/log/cloudflared/" - // sshPortFlag is the port on localhost the cloudflared ssh server will run on sshPortFlag = "local-ssh-port" @@ -174,14 +173,14 @@ func runAdhocNamedTunnel(sc *subcommandContext, name string) error { return errors.Wrap(err, "failed to create tunnel") } } else { - sc.logger.Infof("Tunnel already created with ID %s", tunnel.ID) + sc.log.Info().Msgf("Tunnel already created with ID %s", tunnel.ID) } if r, ok := routeFromFlag(sc.c); ok { if res, err := sc.route(tunnel.ID, r); err != nil { - sc.logger.Errorf("failed to create route, please create it manually. err: %v.", err) + sc.log.Error().Msgf("failed to create route, please create it manually. err: %v.", err) } else { - sc.logger.Infof(res.SuccessSummary()) + sc.log.Info().Msgf(res.SuccessSummary()) } } @@ -194,7 +193,7 @@ func runAdhocNamedTunnel(sc *subcommandContext, name string) error { // runClassicTunnel creates a "classic" non-named tunnel func runClassicTunnel(sc *subcommandContext) error { - return StartServer(sc.c, version, shutdownC, graceShutdownC, nil, sc.logger, sc.isUIEnabled) + return StartServer(sc.c, version, shutdownC, graceShutdownC, nil, sc.log, sc.isUIEnabled) } func routeFromFlag(c *cli.Context) (tunnelstore.Route, bool) { @@ -213,7 +212,7 @@ func StartServer( shutdownC, graceShutdownC chan struct{}, namedTunnel *connection.NamedTunnelConfig, - generalLogger logger.Service, + log *zerolog.Logger, isUIEnabled bool, ) error { _ = raven.SetDSN(sentryDSN) @@ -224,45 +223,45 @@ func StartServer( dnsReadySignal := make(chan struct{}) if config.GetConfiguration().Source() == "" { - generalLogger.Infof(config.ErrNoConfigFile.Error()) + log.Info().Msg(config.ErrNoConfigFile.Error()) } if c.IsSet("trace-output") { tmpTraceFile, err := ioutil.TempFile("", "trace") if err != nil { - generalLogger.Errorf("Failed to create new temporary file to save trace output: %s", err) + log.Error().Msgf("Failed to create new temporary file to save trace output: %s", err) } defer func() { if err := tmpTraceFile.Close(); err != nil { - generalLogger.Errorf("Failed to close trace output file %s with error: %s", tmpTraceFile.Name(), err) + log.Error().Msgf("Failed to close trace output file %s with error: %s", tmpTraceFile.Name(), err) } if err := os.Rename(tmpTraceFile.Name(), c.String("trace-output")); err != nil { - generalLogger.Errorf("Failed to rename temporary trace output file %s to %s with error: %s", tmpTraceFile.Name(), c.String("trace-output"), err) + log.Error().Msgf("Failed to rename temporary trace output file %s to %s with error: %s", tmpTraceFile.Name(), c.String("trace-output"), err) } else { err := os.Remove(tmpTraceFile.Name()) if err != nil { - generalLogger.Errorf("Failed to remove the temporary trace file %s with error: %s", tmpTraceFile.Name(), err) + log.Error().Msgf("Failed to remove the temporary trace file %s with error: %s", tmpTraceFile.Name(), err) } } }() if err := trace.Start(tmpTraceFile); err != nil { - generalLogger.Errorf("Failed to start trace: %s", err) + log.Error().Msgf("Failed to start trace: %s", err) return errors.Wrap(err, "Error starting tracing") } defer trace.Stop() } buildInfo := buildinfo.GetBuildInfo(version) - buildInfo.Log(generalLogger) - logClientOptions(c, generalLogger) + buildInfo.Log(log) + logClientOptions(c, log) if c.IsSet("proxy-dns") { wg.Add(1) go func() { defer wg.Done() - errC <- runDNSProxyServer(c, dnsReadySignal, shutdownC, generalLogger) + errC <- runDNSProxyServer(c, dnsReadySignal, shutdownC, log) }() } else { close(dnsReadySignal) @@ -273,12 +272,12 @@ func StartServer( go notifySystemd(connectedSignal) if c.IsSet("pidfile") { - go writePidFile(connectedSignal, c.String("pidfile"), generalLogger) + go writePidFile(connectedSignal, c.String("pidfile"), log) } cloudflaredID, err := uuid.NewRandom() if err != nil { - generalLogger.Errorf("Cannot generate cloudflared ID: %s", err) + log.Error().Msgf("Cannot generate cloudflared ID: %s", err) return err } @@ -289,12 +288,12 @@ func StartServer( }() // update needs to be after DNS proxy is up to resolve equinox server address - if updater.IsAutoupdateEnabled(c, generalLogger) { - generalLogger.Infof("Autoupdate frequency is set to %v", c.Duration("autoupdate-freq")) + if updater.IsAutoupdateEnabled(c, log) { + log.Info().Msgf("Autoupdate frequency is set to %v", c.Duration("autoupdate-freq")) wg.Add(1) go func() { defer wg.Done() - autoupdater := updater.NewAutoUpdater(c.Duration("autoupdate-freq"), &listeners, generalLogger) + autoupdater := updater.NewAutoUpdater(c.Duration("autoupdate-freq"), &listeners, log) errC <- autoupdater.Run(ctx) }() } @@ -303,21 +302,18 @@ func StartServer( if dnsProxyStandAlone(c) { connectedSignal.Notify() // no grace period, handle SIGINT/SIGTERM immediately - return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, 0, generalLogger) + return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, 0, log) } url := c.String("url") hostname := c.String("hostname") if url == hostname && url != "" && hostname != "" { errText := "hostname and url shouldn't match. See --help for more information" - generalLogger.Error(errText) + log.Error().Msg(errText) return fmt.Errorf(errText) } - transportLogger, err := logger.CreateTransportLoggerFromContext(c, isUIEnabled) - if err != nil { - return errors.Wrap(err, "error setting up transport logger") - } + transportLog := logger.CreateTransportLoggerFromContext(c, isUIEnabled) readinessCh := make(chan connection.Event, 16) uiCh := make(chan connection.Event, 16) @@ -325,30 +321,30 @@ func StartServer( readinessCh, uiCh, } - tunnelConfig, ingressRules, err := prepareTunnelConfig(c, buildInfo, version, generalLogger, transportLogger, namedTunnel, isUIEnabled, eventChannels) + tunnelConfig, ingressRules, err := prepareTunnelConfig(c, buildInfo, version, log, transportLog, namedTunnel, isUIEnabled, eventChannels) if err != nil { - generalLogger.Errorf("Couldn't start tunnel: %v", err) + log.Error().Msgf("Couldn't start tunnel: %v", err) return err } metricsListener, err := listeners.Listen("tcp", c.String("metrics")) if err != nil { - generalLogger.Errorf("Error opening metrics server listener: %s", err) + log.Error().Msgf("Error opening metrics server listener: %s", err) return errors.Wrap(err, "Error opening metrics server listener") } defer metricsListener.Close() wg.Add(1) go func() { defer wg.Done() - errC <- metrics.ServeMetrics(metricsListener, shutdownC, readinessCh, generalLogger) + errC <- metrics.ServeMetrics(metricsListener, shutdownC, readinessCh, log) }() - ingressRules.StartOrigins(&wg, generalLogger, shutdownC, errC) + ingressRules.StartOrigins(&wg, log, shutdownC, errC) reconnectCh := make(chan origin.ReconnectSignal, 1) if c.IsSet("stdin-control") { - generalLogger.Info("Enabling control through stdin") - go stdinControl(reconnectCh, generalLogger) + log.Info().Msg("Enabling control through stdin") + go stdinControl(reconnectCh, log) } wg.Add(1) @@ -365,31 +361,15 @@ func StartServer( &ingressRules, tunnelConfig.HAConnections, ) - logLevels, err := logger.ParseLevelString(c.String("loglevel")) - if err != nil { - return err - } - tunnelInfo.LaunchUI(ctx, generalLogger, transportLogger, logLevels, uiCh) + tunnelInfo.LaunchUI(ctx, log, transportLog, uiCh) } - return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, c.Duration("grace-period"), generalLogger) -} - -// forceSetFlag attempts to set the given flag value in the closest context that has it defined -func forceSetFlag(c *cli.Context, name, value string) { - for _, ctx := range c.Lineage() { - if err := ctx.Set(name, value); err == nil { - break - } - } + return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, c.Duration("grace-period"), log) } func SetFlagsFromConfigFile(c *cli.Context) error { const exitCode = 1 - log, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return cliutil.PrintLoggerSetupError("error setting up logger", err) - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) inputSource, err := config.ReadConfigFile(c, log) if err != nil { if err == config.ErrNoConfigFile { @@ -411,20 +391,20 @@ func waitToShutdown(wg *sync.WaitGroup, errC chan error, shutdownC, graceShutdownC chan struct{}, gracePeriod time.Duration, - logger logger.Service, + log *zerolog.Logger, ) error { var err error if gracePeriod > 0 { - err = waitForSignalWithGraceShutdown(errC, shutdownC, graceShutdownC, gracePeriod, logger) + err = waitForSignalWithGraceShutdown(errC, shutdownC, graceShutdownC, gracePeriod, log) } else { - err = waitForSignal(errC, shutdownC, logger) + err = waitForSignal(errC, shutdownC, log) close(graceShutdownC) } if err != nil { - logger.Errorf("Quitting due to error: %s", err) + log.Error().Msgf("Quitting due to error: %s", err) } else { - logger.Info("Quitting...") + log.Info().Msg("Quitting...") } // Wait for clean exit, discarding all errors go func() { @@ -440,16 +420,16 @@ func notifySystemd(waitForSignal *signal.Signal) { daemon.SdNotify(false, "READY=1") } -func writePidFile(waitForSignal *signal.Signal, pidFile string, logger logger.Service) { +func writePidFile(waitForSignal *signal.Signal, pidFile string, log *zerolog.Logger) { <-waitForSignal.Wait() expandedPath, err := homedir.Expand(pidFile) if err != nil { - logger.Errorf("Unable to expand %s, try to use absolute path in --pidfile: %s", pidFile, err) + log.Error().Msgf("Unable to expand %s, try to use absolute path in --pidfile: %s", pidFile, err) return } file, err := os.Create(expandedPath) if err != nil { - logger.Errorf("Unable to write pid to %s: %s", expandedPath, err) + log.Error().Msgf("Unable to write pid to %s: %s", expandedPath, err) return } defer file.Close() @@ -1018,7 +998,7 @@ func configureProxyDNSFlags(shouldHide bool) []cli.Flag { } } -func stdinControl(reconnectCh chan origin.ReconnectSignal, logger logger.Service) { +func stdinControl(reconnectCh chan origin.ReconnectSignal, log *zerolog.Logger) { for { scanner := bufio.NewScanner(os.Stdin) for scanner.Scan() { @@ -1033,17 +1013,17 @@ func stdinControl(reconnectCh chan origin.ReconnectSignal, logger logger.Service if len(parts) > 1 { var err error if reconnect.Delay, err = time.ParseDuration(parts[1]); err != nil { - logger.Error(err.Error()) + log.Error().Msg(err.Error()) continue } } - logger.Infof("Sending reconnect signal %+v", reconnect) + log.Info().Msgf("Sending reconnect signal %+v", reconnect) reconnectCh <- reconnect default: - logger.Infof("Unknown command: %s", command) + log.Info().Msgf("Unknown command: %s", command) fallthrough case "help": - logger.Info(`Supported command: + log.Info().Msg(`Supported command: reconnect [delay] - restarts one randomly chosen connection with optional delay before reconnect`) } diff --git a/cmd/cloudflared/tunnel/configuration.go b/cmd/cloudflared/tunnel/configuration.go index df81ce85..63833b53 100644 --- a/cmd/cloudflared/tunnel/configuration.go +++ b/cmd/cloudflared/tunnel/configuration.go @@ -14,7 +14,6 @@ import ( "github.com/cloudflare/cloudflared/edgediscovery" "github.com/cloudflare/cloudflared/h2mux" "github.com/cloudflare/cloudflared/ingress" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/origin" "github.com/cloudflare/cloudflared/tlsconfig" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" @@ -23,6 +22,7 @@ import ( "github.com/google/uuid" "github.com/mitchellh/go-homedir" "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/urfave/cli/v2" "golang.org/x/crypto/ssh/terminal" ) @@ -46,16 +46,16 @@ func findDefaultOriginCertPath() string { return "" } -func generateRandomClientID(logger logger.Service) (string, error) { +func generateRandomClientID(log *zerolog.Logger) (string, error) { u, err := uuid.NewRandom() if err != nil { - logger.Errorf("couldn't create UUID for client ID %s", err) + log.Error().Msgf("couldn't create UUID for client ID %s", err) return "", err } return u.String(), nil } -func logClientOptions(c *cli.Context, logger logger.Service) { +func logClientOptions(c *cli.Context, log *zerolog.Logger) { flags := make(map[string]interface{}) for _, flag := range c.LocalFlagNames() { flags[flag] = c.Generic(flag) @@ -69,7 +69,7 @@ func logClientOptions(c *cli.Context, logger logger.Service) { } if len(flags) > 0 { - logger.Infof("Environment variables %v", flags) + log.Info().Msgf("Environment variables %v", flags) } envs := make(map[string]string) @@ -84,7 +84,7 @@ func logClientOptions(c *cli.Context, logger logger.Service) { } } if len(envs) > 0 { - logger.Infof("Environmental variables %v", envs) + log.Info().Msgf("Environmental variables %v", envs) } } @@ -92,32 +92,32 @@ func dnsProxyStandAlone(c *cli.Context) bool { return c.IsSet("proxy-dns") && (!c.IsSet("hostname") && !c.IsSet("tag") && !c.IsSet("hello-world")) } -func findOriginCert(c *cli.Context, logger logger.Service) (string, error) { +func findOriginCert(c *cli.Context, log *zerolog.Logger) (string, error) { originCertPath := c.String("origincert") if originCertPath == "" { - logger.Infof("Cannot determine default origin certificate path. No file %s in %v", config.DefaultCredentialFile, config.DefaultConfigSearchDirectories()) + log.Info().Msgf("Cannot determine default origin certificate path. No file %s in %v", config.DefaultCredentialFile, config.DefaultConfigSearchDirectories()) if isRunningFromTerminal() { - logger.Errorf("You need to specify the origin certificate path with --origincert option, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", argumentsUrl) + log.Error().Msgf("You need to specify the origin certificate path with --origincert option, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", argumentsUrl) return "", fmt.Errorf("Client didn't specify origincert path when running from terminal") } else { - logger.Errorf("You need to specify the origin certificate path by specifying the origincert option in the configuration file, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", serviceUrl) + log.Error().Msgf("You need to specify the origin certificate path by specifying the origincert option in the configuration file, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", serviceUrl) return "", fmt.Errorf("Client didn't specify origincert path") } } var err error originCertPath, err = homedir.Expand(originCertPath) if err != nil { - logger.Errorf("Cannot resolve path %s: %s", originCertPath, err) + log.Error().Msgf("Cannot resolve path %s: %s", originCertPath, err) return "", fmt.Errorf("Cannot resolve path %s", originCertPath) } // Check that the user has acquired a certificate using the login command ok, err := config.FileExists(originCertPath) if err != nil { - logger.Errorf("Cannot check if origin cert exists at path %s", originCertPath) + log.Error().Msgf("Cannot check if origin cert exists at path %s", originCertPath) return "", fmt.Errorf("Cannot check if origin cert exists at path %s", originCertPath) } if !ok { - logger.Errorf(`Cannot find a valid certificate for your origin at the path: + log.Error().Msgf(`Cannot find a valid certificate for your origin at the path: %s @@ -132,23 +132,23 @@ If you don't have a certificate signed by Cloudflare, run the command: return originCertPath, nil } -func readOriginCert(originCertPath string, logger logger.Service) ([]byte, error) { - logger.Debugf("Reading origin cert from %s", originCertPath) +func readOriginCert(originCertPath string, log *zerolog.Logger) ([]byte, error) { + log.Debug().Msgf("Reading origin cert from %s", originCertPath) // Easier to send the certificate as []byte via RPC than decoding it at this point originCert, err := ioutil.ReadFile(originCertPath) if err != nil { - logger.Errorf("Cannot read %s to load origin certificate: %s", originCertPath, err) + log.Error().Msgf("Cannot read %s to load origin certificate: %s", originCertPath, err) return nil, fmt.Errorf("Cannot read %s to load origin certificate", originCertPath) } return originCert, nil } -func getOriginCert(c *cli.Context, logger logger.Service) ([]byte, error) { - if originCertPath, err := findOriginCert(c, logger); err != nil { +func getOriginCert(c *cli.Context, log *zerolog.Logger) ([]byte, error) { + if originCertPath, err := findOriginCert(c, log); err != nil { return nil, err } else { - return readOriginCert(originCertPath, logger) + return readOriginCert(originCertPath, log) } } @@ -156,8 +156,8 @@ func prepareTunnelConfig( c *cli.Context, buildInfo *buildinfo.BuildInfo, version string, - logger logger.Service, - transportLogger logger.Service, + log *zerolog.Logger, + transportLogger *zerolog.Logger, namedTunnel *connection.NamedTunnelConfig, isUIEnabled bool, eventChans []chan connection.Event, @@ -166,13 +166,13 @@ func prepareTunnelConfig( hostname, err := validation.ValidateHostname(c.String("hostname")) if err != nil { - logger.Errorf("Invalid hostname: %s", err) + log.Error().Msgf("Invalid hostname: %s", err) return nil, ingress.Ingress{}, errors.Wrap(err, "Invalid hostname") } isFreeTunnel := hostname == "" clientID := c.String("id") if !c.IsSet("id") { - clientID, err = generateRandomClientID(logger) + clientID, err = generateRandomClientID(log) if err != nil { return nil, ingress.Ingress{}, err } @@ -180,7 +180,7 @@ func prepareTunnelConfig( tags, err := NewTagSliceFromCLI(c.StringSlice("tag")) if err != nil { - logger.Errorf("Tag parse failure: %s", err) + log.Error().Msgf("Tag parse failure: %s", err) return nil, ingress.Ingress{}, errors.Wrap(err, "Tag parse failure") } @@ -188,7 +188,7 @@ func prepareTunnelConfig( var originCert []byte if !isFreeTunnel { - originCert, err = getOriginCert(c, logger) + originCert, err = getOriginCert(c, log) if err != nil { return nil, ingress.Ingress{}, errors.Wrap(err, "Error getting origin cert") } @@ -227,17 +227,17 @@ func prepareTunnelConfig( // Convert single-origin configuration into multi-origin configuration. if ingressRules.IsEmpty() { - ingressRules, err = ingress.NewSingleOrigin(c, !isNamedTunnel, logger) + ingressRules, err = ingress.NewSingleOrigin(c, !isNamedTunnel) if err != nil { return nil, ingress.Ingress{}, err } } - protocolSelector, err := connection.NewProtocolSelector(c.String("protocol"), namedTunnel, edgediscovery.HTTP2Percentage, origin.ResolveTTL, logger) + protocolSelector, err := connection.NewProtocolSelector(c.String("protocol"), namedTunnel, edgediscovery.HTTP2Percentage, origin.ResolveTTL, log) if err != nil { return nil, ingress.Ingress{}, err } - logger.Infof("Initial protocol %s", protocolSelector.Current()) + log.Info().Msgf("Initial protocol %s", protocolSelector.Current()) edgeTLSConfigs := make(map[connection.Protocol]*tls.Config, len(connection.ProtocolList)) for _, p := range connection.ProtocolList { @@ -248,7 +248,7 @@ func prepareTunnelConfig( edgeTLSConfigs[p] = edgeTLSConfig } - originClient := origin.NewClient(ingressRules, tags, logger) + originClient := origin.NewClient(ingressRules, tags, log) connectionConfig := &connection.Config{ OriginClient: originClient, GracePeriod: c.Duration("grace-period"), @@ -272,7 +272,7 @@ func prepareTunnelConfig( IsFreeTunnel: isFreeTunnel, LBPool: c.String("lb-pool"), Tags: tags, - Logger: logger, + Log: log, Observer: connection.NewObserver(transportLogger, eventChans, isUIEnabled), ReportedVersion: version, Retries: c.Uint("retries"), diff --git a/cmd/cloudflared/tunnel/credential_finder.go b/cmd/cloudflared/tunnel/credential_finder.go index c877e5ba..d9ae5fa1 100644 --- a/cmd/cloudflared/tunnel/credential_finder.go +++ b/cmd/cloudflared/tunnel/credential_finder.go @@ -5,8 +5,9 @@ import ( "path/filepath" "github.com/cloudflare/cloudflared/cmd/cloudflared/config" - "github.com/cloudflare/cloudflared/logger" + "github.com/google/uuid" + "github.com/rs/zerolog" "github.com/urfave/cli/v2" ) @@ -39,25 +40,25 @@ func (a staticPath) Path() (string, error) { // Implements CredFinder and looks for the credentials file in several directories // searching for a file named .json type searchByID struct { - id uuid.UUID - c *cli.Context - logger logger.Service - fs fileSystem + id uuid.UUID + c *cli.Context + log *zerolog.Logger + fs fileSystem } -func newSearchByID(id uuid.UUID, c *cli.Context, logger logger.Service, fs fileSystem) CredFinder { +func newSearchByID(id uuid.UUID, c *cli.Context, log *zerolog.Logger, fs fileSystem) CredFinder { return searchByID{ - id: id, - c: c, - logger: logger, - fs: fs, + id: id, + c: c, + log: log, + fs: fs, } } func (s searchByID) Path() (string, error) { // Fallback to look for tunnel credentials in the origin cert directory - if originCertPath, err := findOriginCert(s.c, s.logger); err == nil { + if originCertPath, err := findOriginCert(s.c, s.log); err == nil { originCertDir := filepath.Dir(originCertPath) if filePath, err := tunnelFilePath(s.id, originCertDir); err == nil { if s.fs.validFilePath(filePath) { diff --git a/cmd/cloudflared/tunnel/ingress_subcommands.go b/cmd/cloudflared/tunnel/ingress_subcommands.go index 0a818812..5d820081 100644 --- a/cmd/cloudflared/tunnel/ingress_subcommands.go +++ b/cmd/cloudflared/tunnel/ingress_subcommands.go @@ -4,12 +4,12 @@ import ( "fmt" "net/url" - "github.com/pkg/errors" - "github.com/urfave/cli/v2" - "github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil" "github.com/cloudflare/cloudflared/cmd/cloudflared/config" "github.com/cloudflare/cloudflared/ingress" + + "github.com/pkg/errors" + "github.com/urfave/cli/v2" ) func buildIngressSubcommand() *cli.Command { diff --git a/cmd/cloudflared/tunnel/login.go b/cmd/cloudflared/tunnel/login.go index 99c6d15d..8d357798 100644 --- a/cmd/cloudflared/tunnel/login.go +++ b/cmd/cloudflared/tunnel/login.go @@ -8,9 +8,9 @@ import ( "path/filepath" "syscall" - homedir "github.com/mitchellh/go-homedir" + "github.com/mitchellh/go-homedir" "github.com/pkg/errors" - cli "github.com/urfave/cli/v2" + "github.com/urfave/cli/v2" "github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil" "github.com/cloudflare/cloudflared/cmd/cloudflared/config" @@ -40,10 +40,7 @@ func buildLoginSubcommand(hidden bool) *cli.Command { } func login(c *cli.Context) error { - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) path, ok, err := checkForExistingCert() if ok { @@ -59,7 +56,15 @@ func login(c *cli.Context) error { return err } - resourceData, err := transfer.Run(loginURL, "cert", "callback", callbackStoreURL, false, false, logger) + resourceData, err := transfer.Run( + loginURL, + "cert", + "callback", + callbackStoreURL, + false, + false, + log, + ) if err != nil { fmt.Fprintf(os.Stderr, "Failed to write the certificate due to the following error:\n%v\n\nYour browser will download the certificate instead. You will have to manually\ncopy it to the following path:\n\n%s\n", err, path) return err diff --git a/cmd/cloudflared/tunnel/server.go b/cmd/cloudflared/tunnel/server.go index 832f5a97..b1f36222 100644 --- a/cmd/cloudflared/tunnel/server.go +++ b/cmd/cloudflared/tunnel/server.go @@ -1,20 +1,19 @@ package tunnel import ( - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/tunneldns" - "github.com/urfave/cli/v2" - "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/urfave/cli/v2" ) -func runDNSProxyServer(c *cli.Context, dnsReadySignal, shutdownC chan struct{}, logger logger.Service) error { +func runDNSProxyServer(c *cli.Context, dnsReadySignal, shutdownC chan struct{}, log *zerolog.Logger) error { port := c.Int("proxy-dns-port") if port <= 0 || port > 65535 { return errors.New("The 'proxy-dns-port' must be a valid port number in <1, 65535> range.") } - listener, err := tunneldns.CreateListener(c.String("proxy-dns-address"), uint16(port), c.StringSlice("proxy-dns-upstream"), c.StringSlice("proxy-dns-bootstrap"), logger) + listener, err := tunneldns.CreateListener(c.String("proxy-dns-address"), uint16(port), c.StringSlice("proxy-dns-upstream"), c.StringSlice("proxy-dns-bootstrap"), log) if err != nil { close(dnsReadySignal) listener.Stop() @@ -26,6 +25,6 @@ func runDNSProxyServer(c *cli.Context, dnsReadySignal, shutdownC chan struct{}, return errors.Wrap(err, "Cannot start the DNS over HTTPS proxy server") } <-shutdownC - listener.Stop() + _ = listener.Stop() return nil } diff --git a/cmd/cloudflared/tunnel/signal.go b/cmd/cloudflared/tunnel/signal.go index 41a92d69..671c305f 100644 --- a/cmd/cloudflared/tunnel/signal.go +++ b/cmd/cloudflared/tunnel/signal.go @@ -6,24 +6,24 @@ import ( "syscall" "time" - "github.com/cloudflare/cloudflared/logger" + "github.com/rs/zerolog" ) // waitForSignal notifies all routines to shutdownC immediately by closing the // shutdownC when one of the routines in main exits, or when this process receives // SIGTERM/SIGINT -func waitForSignal(errC chan error, shutdownC chan struct{}, logger logger.Service) error { +func waitForSignal(errC chan error, shutdownC chan struct{}, log *zerolog.Logger) error { signals := make(chan os.Signal, 10) signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT) defer signal.Stop(signals) select { case err := <-errC: - logger.Infof("terminating due to error: %v", err) + log.Info().Msgf("terminating due to error: %v", err) close(shutdownC) return err case s := <-signals: - logger.Infof("terminating due to signal %s", s) + log.Info().Msgf("terminating due to signal %s", s) close(shutdownC) case <-shutdownC: } @@ -41,7 +41,7 @@ func waitForSignal(errC chan error, shutdownC chan struct{}, logger logger.Servi func waitForSignalWithGraceShutdown(errC chan error, shutdownC, graceShutdownC chan struct{}, gracePeriod time.Duration, - logger logger.Service, + logger *zerolog.Logger, ) error { signals := make(chan os.Signal, 10) signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT) @@ -49,16 +49,16 @@ func waitForSignalWithGraceShutdown(errC chan error, select { case err := <-errC: - logger.Infof("Initiating graceful shutdown due to %v ...", err) + logger.Info().Msgf("Initiating graceful shutdown due to %v ...", err) close(graceShutdownC) close(shutdownC) return err case s := <-signals: - logger.Infof("Initiating graceful shutdown due to signal %s ...", s) + logger.Info().Msgf("Initiating graceful shutdown due to signal %s ...", s) close(graceShutdownC) - waitForGracePeriod(signals, errC, shutdownC, gracePeriod, logger) + waitForGracePeriod(signals, errC, shutdownC, gracePeriod) case <-graceShutdownC: - waitForGracePeriod(signals, errC, shutdownC, gracePeriod, logger) + waitForGracePeriod(signals, errC, shutdownC, gracePeriod) case <-shutdownC: close(graceShutdownC) } @@ -70,7 +70,6 @@ func waitForGracePeriod(signals chan os.Signal, errC chan error, shutdownC chan struct{}, gracePeriod time.Duration, - logger logger.Service, ) { // Unregister signal handler early, so the client can send a second SIGTERM/SIGINT // to force shutdown cloudflared diff --git a/cmd/cloudflared/tunnel/signal_test.go b/cmd/cloudflared/tunnel/signal_test.go index 0f5e735f..f3166eee 100644 --- a/cmd/cloudflared/tunnel/signal_test.go +++ b/cmd/cloudflared/tunnel/signal_test.go @@ -2,11 +2,11 @@ package tunnel import ( "fmt" + "github.com/rs/zerolog" "syscall" "testing" "time" - "github.com/cloudflare/cloudflared/logger" "github.com/stretchr/testify/assert" ) @@ -28,7 +28,7 @@ func testChannelClosed(t *testing.T, c chan struct{}) { } func TestWaitForSignal(t *testing.T) { - logger := logger.NewOutputWriter(logger.NewMockWriteManager()) + log := zerolog.Nop() // Test handling server error errC := make(chan error) @@ -39,7 +39,7 @@ func TestWaitForSignal(t *testing.T) { }() // received error, shutdownC should be closed - err := waitForSignal(errC, shutdownC, logger) + err := waitForSignal(errC, shutdownC, &log) assert.Equal(t, serverErr, err) testChannelClosed(t, shutdownC) @@ -56,10 +56,10 @@ func TestWaitForSignal(t *testing.T) { go func(sig syscall.Signal) { // sleep for a tick to prevent sending signal before calling waitForSignal time.Sleep(tick) - syscall.Kill(syscall.Getpid(), sig) + _ = syscall.Kill(syscall.Getpid(), sig) }(sig) - err = waitForSignal(errC, shutdownC, logger) + err = waitForSignal(errC, shutdownC, &log) assert.Equal(t, nil, err) assert.Equal(t, shutdownErr, <-errC) testChannelClosed(t, shutdownC) @@ -76,10 +76,10 @@ func TestWaitForSignalWithGraceShutdown(t *testing.T) { errC <- serverErr }() - logger := logger.NewOutputWriter(logger.NewMockWriteManager()) + log := zerolog.Nop() // received error, both shutdownC and graceshutdownC should be closed - err := waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, logger) + err := waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, &log) assert.Equal(t, serverErr, err) testChannelClosed(t, shutdownC) testChannelClosed(t, graceshutdownC) @@ -89,7 +89,7 @@ func TestWaitForSignalWithGraceShutdown(t *testing.T) { shutdownC = make(chan struct{}) graceshutdownC = make(chan struct{}) close(shutdownC) - err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, logger) + err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, &log) assert.NoError(t, err) testChannelClosed(t, shutdownC) testChannelClosed(t, graceshutdownC) @@ -99,7 +99,7 @@ func TestWaitForSignalWithGraceShutdown(t *testing.T) { shutdownC = make(chan struct{}) graceshutdownC = make(chan struct{}) close(graceshutdownC) - err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, logger) + err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, &log) assert.NoError(t, err) testChannelClosed(t, shutdownC) testChannelClosed(t, graceshutdownC) @@ -119,10 +119,10 @@ func TestWaitForSignalWithGraceShutdown(t *testing.T) { go func(sig syscall.Signal) { // sleep for a tick to prevent sending signal before calling waitForSignalWithGraceShutdown time.Sleep(tick) - syscall.Kill(syscall.Getpid(), sig) + _ = syscall.Kill(syscall.Getpid(), sig) }(sig) - err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, logger) + err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, &log) assert.Equal(t, nil, err) assert.Equal(t, graceShutdownErr, <-errC) testChannelClosed(t, shutdownC) @@ -145,10 +145,10 @@ func TestWaitForSignalWithGraceShutdown(t *testing.T) { go func(sig syscall.Signal) { // sleep for a tick to prevent sending signal before calling waitForSignalWithGraceShutdown time.Sleep(tick) - syscall.Kill(syscall.Getpid(), sig) + _ = syscall.Kill(syscall.Getpid(), sig) }(sig) - err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, logger) + err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, &log) assert.Equal(t, nil, err) assert.Equal(t, shutdownErr, <-errC) testChannelClosed(t, shutdownC) diff --git a/cmd/cloudflared/tunnel/subcommand_context.go b/cmd/cloudflared/tunnel/subcommand_context.go index 3e2d572a..09e4efeb 100644 --- a/cmd/cloudflared/tunnel/subcommand_context.go +++ b/cmd/cloudflared/tunnel/subcommand_context.go @@ -8,6 +8,7 @@ import ( "github.com/google/uuid" "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/urfave/cli/v2" "github.com/cloudflare/cloudflared/certutil" @@ -29,7 +30,7 @@ func (e errInvalidJSONCredential) Error() string { // pass between subcommands, and make sure they are only initialized once type subcommandContext struct { c *cli.Context - logger logger.Service + log *zerolog.Logger isUIEnabled bool fs fileSystem @@ -42,14 +43,11 @@ func newSubcommandContext(c *cli.Context) (*subcommandContext, error) { isUIEnabled := c.IsSet(uiFlag) && c.String("name") != "" // If UI is enabled, terminal log output should be disabled -- log should be written into a UI log window instead - logger, err := logger.CreateLoggerFromContext(c, isUIEnabled) - if err != nil { - return nil, errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, isUIEnabled) return &subcommandContext{ c: c, - logger: logger, + log: log, isUIEnabled: isUIEnabled, fs: realFileSystem{}, }, nil @@ -60,7 +58,7 @@ func (sc *subcommandContext) credentialFinder(tunnelID uuid.UUID) CredFinder { if path := sc.c.String(CredFileFlag); path != "" { return newStaticPath(path, sc.fs) } - return newSearchByID(tunnelID, sc.c, sc.logger, sc.fs) + return newSearchByID(tunnelID, sc.c, sc.log, sc.fs) } type userCredential struct { @@ -77,7 +75,15 @@ func (sc *subcommandContext) client() (tunnelstore.Client, error) { return nil, err } userAgent := fmt.Sprintf("cloudflared/%s", version) - client, err := tunnelstore.NewRESTClient(sc.c.String("api-url"), credential.cert.AccountID, credential.cert.ZoneID, credential.cert.ServiceKey, userAgent, sc.logger) + client, err := tunnelstore.NewRESTClient( + sc.c.String("api-url"), + credential.cert.AccountID, + credential.cert.ZoneID, + credential.cert.ServiceKey, + userAgent, + sc.log, + ) + if err != nil { return nil, err } @@ -87,11 +93,11 @@ func (sc *subcommandContext) client() (tunnelstore.Client, error) { func (sc *subcommandContext) credential() (*userCredential, error) { if sc.userCredential == nil { - originCertPath, err := findOriginCert(sc.c, sc.logger) + originCertPath, err := findOriginCert(sc.c, sc.log) if err != nil { return nil, errors.Wrap(err, "Error locating origin cert") } - blocks, err := readOriginCert(originCertPath, sc.logger) + blocks, err := readOriginCert(originCertPath, sc.log) if err != nil { return nil, errors.Wrapf(err, "Can't read origin cert from %s", originCertPath) } @@ -163,7 +169,7 @@ func (sc *subcommandContext) create(name string) (*tunnelstore.Tunnel, error) { TunnelName: name, } filePath, writeFileErr := writeTunnelCredentials(credential.certPath, &tunnelCredentials) - if err != nil { + if writeFileErr != nil { var errorLines []string errorLines = append(errorLines, fmt.Sprintf("Your tunnel '%v' was created with ID %v. However, cloudflared couldn't write to the tunnel credentials file at %v.json.", tunnel.Name, tunnel.ID, tunnel.ID)) errorLines = append(errorLines, fmt.Sprintf("The file-writing error is: %v", writeFileErr)) @@ -176,13 +182,13 @@ func (sc *subcommandContext) create(name string) (*tunnelstore.Tunnel, error) { errorMsg := strings.Join(errorLines, "\n") return nil, errors.New(errorMsg) } - sc.logger.Infof("Tunnel credentials written to %v. cloudflared chose this file based on where your origin certificate was found. Keep this file secret. To revoke these credentials, delete the tunnel.", filePath) + sc.log.Info().Msgf("Tunnel credentials written to %v. cloudflared chose this file based on where your origin certificate was found. Keep this file secret. To revoke these credentials, delete the tunnel.", filePath) if outputFormat := sc.c.String(outputFormatFlag.Name); outputFormat != "" { return nil, renderOutput(outputFormat, &tunnel) } - sc.logger.Infof("Created tunnel %s with id %s", tunnel.Name, tunnel.ID) + sc.log.Info().Msgf("Created tunnel %s with id %s", tunnel.Name, tunnel.ID) return tunnel, nil } @@ -230,7 +236,7 @@ func (sc *subcommandContext) delete(tunnelIDs []uuid.UUID) error { credFinder := sc.credentialFinder(id) if tunnelCredentialsPath, err := credFinder.Path(); err == nil { if err = os.Remove(tunnelCredentialsPath); err != nil { - sc.logger.Infof("Tunnel %v was deleted, but we could not remove its credentials file %s: %s. Consider deleting this file manually.", id, tunnelCredentialsPath, err) + sc.log.Info().Msgf("Tunnel %v was deleted, but we could not remove its credentials file %s: %s. Consider deleting this file manually.", id, tunnelCredentialsPath, err) } } } @@ -254,18 +260,19 @@ func (sc *subcommandContext) run(tunnelID uuid.UUID) error { credentials, err := sc.findCredentials(tunnelID) if err != nil { if e, ok := err.(errInvalidJSONCredential); ok { - sc.logger.Errorf("The credentials file at %s contained invalid JSON. This is probably caused by passing the wrong filepath. Reminder: the credentials file is a .json file created via `cloudflared tunnel create`.", e.path) - sc.logger.Errorf("Invalid JSON when parsing credentials file: %s", e.err.Error()) + sc.log.Error().Msgf("The credentials file at %s contained invalid JSON. This is probably caused by passing the wrong filepath. Reminder: the credentials file is a .json file created via `cloudflared tunnel create`.", e.path) + sc.log.Error().Msgf("Invalid JSON when parsing credentials file: %s", e.err.Error()) } return err } + return StartServer( sc.c, version, shutdownC, graceShutdownC, &connection.NamedTunnelConfig{Credentials: credentials}, - sc.logger, + sc.log, sc.isUIEnabled, ) } @@ -276,9 +283,9 @@ func (sc *subcommandContext) cleanupConnections(tunnelIDs []uuid.UUID) error { return err } for _, tunnelID := range tunnelIDs { - sc.logger.Infof("Cleanup connection for tunnel %s", tunnelID) + sc.log.Info().Msgf("Cleanup connection for tunnel %s", tunnelID) if err := client.CleanupConnections(tunnelID); err != nil { - sc.logger.Errorf("Error cleaning up connections for tunnel %v, error :%v", tunnelID, err) + sc.log.Error().Msgf("Error cleaning up connections for tunnel %v, error :%v", tunnelID, err) } } return nil diff --git a/cmd/cloudflared/tunnel/subcommand_context_test.go b/cmd/cloudflared/tunnel/subcommand_context_test.go index 6577b8e0..11b714be 100644 --- a/cmd/cloudflared/tunnel/subcommand_context_test.go +++ b/cmd/cloudflared/tunnel/subcommand_context_test.go @@ -4,16 +4,15 @@ import ( "encoding/base64" "flag" "fmt" + "github.com/rs/zerolog" "reflect" "testing" "time" "github.com/cloudflare/cloudflared/connection" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/tunnelstore" "github.com/google/uuid" "github.com/pkg/errors" - "github.com/stretchr/testify/require" "github.com/urfave/cli/v2" ) @@ -106,7 +105,7 @@ func (fs mockFileSystem) readFile(filePath string) ([]byte, error) { func Test_subcommandContext_findCredentials(t *testing.T) { type fields struct { c *cli.Context - logger logger.Service + log *zerolog.Logger isUIEnabled bool fs fileSystem tunnelstoreClient tunnelstore.Client @@ -137,8 +136,7 @@ func Test_subcommandContext_findCredentials(t *testing.T) { }, vfp: func(string) bool { return true }, } - logger, err := logger.New() - require.NoError(t, err) + log := zerolog.Nop() tests := []struct { name string @@ -150,13 +148,13 @@ func Test_subcommandContext_findCredentials(t *testing.T) { { name: "Filepath given leads to old credentials file", fields: fields{ - logger: logger, - fs: fs, + log: &log, + fs: fs, c: func() *cli.Context { flagSet := flag.NewFlagSet("test0", flag.PanicOnError) flagSet.String(CredFileFlag, oldCertPath, "") c := cli.NewContext(cli.NewApp(), flagSet, nil) - err = c.Set(CredFileFlag, oldCertPath) + _ = c.Set(CredFileFlag, oldCertPath) return c }(), }, @@ -172,13 +170,13 @@ func Test_subcommandContext_findCredentials(t *testing.T) { { name: "Filepath given leads to new credentials file", fields: fields{ - logger: logger, - fs: fs, + log: &log, + fs: fs, c: func() *cli.Context { flagSet := flag.NewFlagSet("test0", flag.PanicOnError) flagSet.String(CredFileFlag, newCertPath, "") c := cli.NewContext(cli.NewApp(), flagSet, nil) - err = c.Set(CredFileFlag, newCertPath) + _ = c.Set(CredFileFlag, newCertPath) return c }(), }, @@ -197,7 +195,7 @@ func Test_subcommandContext_findCredentials(t *testing.T) { t.Run(tt.name, func(t *testing.T) { sc := &subcommandContext{ c: tt.fields.c, - logger: tt.fields.logger, + log: tt.fields.log, isUIEnabled: tt.fields.isUIEnabled, fs: tt.fields.fs, tunnelstoreClient: tt.fields.tunnelstoreClient, diff --git a/cmd/cloudflared/tunnel/subcommands.go b/cmd/cloudflared/tunnel/subcommands.go index 0d607b27..0274b5f2 100644 --- a/cmd/cloudflared/tunnel/subcommands.go +++ b/cmd/cloudflared/tunnel/subcommands.go @@ -223,7 +223,7 @@ func fmtAndPrintTunnelList(tunnels []*tunnelstore.Tunnel, showRecentlyDisconnect defer writer.Flush() // Print column headers with tabbed columns - fmt.Fprintln(writer, "ID\tNAME\tCREATED\tCONNECTIONS\t") + _, _ = fmt.Fprintln(writer, "ID\tNAME\tCREATED\tCONNECTIONS\t") // Loop through tunnels, create formatted string for each, and print using tabwriter for _, t := range tunnels { @@ -234,7 +234,7 @@ func fmtAndPrintTunnelList(tunnels []*tunnelstore.Tunnel, showRecentlyDisconnect t.CreatedAt.Format(time.RFC3339), fmtConnections(t.Connections, showRecentlyDisconnected), ) - fmt.Fprintln(writer, formattedStr) + _, _ = fmt.Fprintln(writer, formattedStr) } } @@ -360,7 +360,7 @@ func runNamedTunnel(sc *subcommandContext, tunnelRef string) error { return errors.Wrap(err, "error parsing tunnel ID") } - sc.logger.Infof("Starting tunnel %s", tunnelID.String()) + sc.log.Info().Msgf("Starting tunnel %s", tunnelID.String()) return sc.run(tunnelID) } @@ -515,7 +515,7 @@ func routeCommand(c *cli.Context) error { return err } - sc.logger.Infof(res.SuccessSummary()) + sc.log.Info().Msg(res.SuccessSummary()) return nil } diff --git a/cmd/cloudflared/ui/launch_ui.go b/cmd/cloudflared/ui/launch_ui.go index 3869559b..8c3bcf4f 100644 --- a/cmd/cloudflared/ui/launch_ui.go +++ b/cmd/cloudflared/ui/launch_ui.go @@ -4,14 +4,13 @@ import ( "context" "fmt" "strings" - "time" "github.com/cloudflare/cloudflared/connection" "github.com/cloudflare/cloudflared/ingress" - "github.com/cloudflare/cloudflared/logger" "github.com/gdamore/tcell" "github.com/rivo/tview" + "github.com/rs/zerolog" ) type connState struct { @@ -51,16 +50,16 @@ func NewUIModel(version, hostname, metricsURL string, ing *ingress.Ingress, haCo func (data *uiModel) LaunchUI( ctx context.Context, - generalLogger, transportLogger logger.Service, - logLevels []logger.Level, + log, transportLog *zerolog.Logger, tunnelEventChan <-chan connection.Event, ) { // Configure the logger to stream logs into the textview // Add TextView as a group to write output to logTextView := NewDynamicColorTextView() - generalLogger.Add(logTextView, logger.NewUIFormatter(time.RFC3339), logLevels...) - transportLogger.Add(logTextView, logger.NewUIFormatter(time.RFC3339), logLevels...) + // TODO: Format log for UI + //log.Add(logTextView, logger.NewUIFormatter(time.RFC3339), logLevels...) + //transportLog.Add(logTextView, logger.NewUIFormatter(time.RFC3339), logLevels...) // Construct the UI palette := palette{ @@ -125,7 +124,7 @@ func (data *uiModel) LaunchUI( case connection.Connected: data.setConnTableCell(event, connTable, palette) case connection.Disconnected, connection.Reconnecting: - data.changeConnStatus(event, connTable, generalLogger, palette) + data.changeConnStatus(event, connTable, log, palette) case connection.SetURL: tunnelHostText.SetText(event.URL) data.edgeURL = event.URL @@ -141,7 +140,7 @@ func (data *uiModel) LaunchUI( go func() { if err := app.SetRoot(frame, true).Run(); err != nil { - generalLogger.Errorf("Error launching UI: %s", err) + log.Error().Msgf("Error launching UI: %s", err) } }() } @@ -159,13 +158,13 @@ func handleNewText(app *tview.Application, logTextView *tview.TextView) func() { } } -func (data *uiModel) changeConnStatus(event connection.Event, table *tview.Table, logger logger.Service, palette palette) { +func (data *uiModel) changeConnStatus(event connection.Event, table *tview.Table, log *zerolog.Logger, palette palette) { index := int(event.Index) // Get connection location and state connState := data.getConnState(index) // Check if connection is already displayed in UI if connState == nil { - logger.Info("Connection is not in the UI table") + log.Info().Msg("Connection is not in the UI table") return } diff --git a/cmd/cloudflared/updater/update.go b/cmd/cloudflared/updater/update.go index 30fa7bc7..9b865b13 100644 --- a/cmd/cloudflared/updater/update.go +++ b/cmd/cloudflared/updater/update.go @@ -3,6 +3,7 @@ package updater import ( "context" "fmt" + "github.com/rs/zerolog" "os" "path/filepath" "runtime" @@ -14,12 +15,10 @@ import ( "github.com/cloudflare/cloudflared/cmd/cloudflared/config" "github.com/cloudflare/cloudflared/logger" "github.com/facebookgo/grace/gracenet" - "github.com/pkg/errors" ) const ( DefaultCheckUpdateFreq = time.Hour * 24 - appID = "app_idCzgxYerVD" noUpdateInShellMessage = "cloudflared will not automatically update when run from the shell. To enable auto-updates, run cloudflared as a service: https://developers.cloudflare.com/argo-tunnel/reference/service/" noUpdateOnWindowsMessage = "cloudflared will not automatically update on Windows systems." noUpdateManagedPackageMessage = "cloudflared will not automatically update if installed by a package manager." @@ -114,38 +113,35 @@ func checkForUpdateAndApply(options updateOptions) UpdateOutcome { // Update is the handler for the update command from the command line func Update(c *cli.Context) error { - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) if wasInstalledFromPackageManager() { - logger.Error("cloudflared was installed by a package manager. Please update using the same method.") + log.Error().Msg("cloudflared was installed by a package manager. Please update using the same method.") return nil } isBeta := c.Bool("beta") if isBeta { - logger.Info("cloudflared is set to update to the latest beta version") + log.Info().Msg("cloudflared is set to update to the latest beta version") } isStaging := c.Bool("staging") if isStaging { - logger.Info("cloudflared is set to update from staging") + log.Info().Msg("cloudflared is set to update from staging") } isForced := c.Bool("force") if isForced { - logger.Info("cloudflared is set to upgrade to the latest publish version regardless of the current version") + log.Info().Msg("cloudflared is set to upgrade to the latest publish version regardless of the current version") } - updateOutcome := loggedUpdate(logger, updateOptions{isBeta: isBeta, isStaging: isStaging, isForced: isForced, version: c.String("version")}) + updateOutcome := loggedUpdate(log, updateOptions{isBeta: isBeta, isStaging: isStaging, isForced: isForced, version: c.String("version")}) if updateOutcome.Error != nil { return &statusErr{updateOutcome.Error} } if updateOutcome.noUpdate() { - logger.Infof("cloudflared is up to date (%s)", updateOutcome.Version) + log.Info().Msgf("cloudflared is up to date (%s)", updateOutcome.Version) return nil } @@ -153,13 +149,13 @@ func Update(c *cli.Context) error { } // Checks for an update and applies it if one is available -func loggedUpdate(logger logger.Service, options updateOptions) UpdateOutcome { +func loggedUpdate(log *zerolog.Logger, options updateOptions) UpdateOutcome { updateOutcome := checkForUpdateAndApply(options) if updateOutcome.Updated { - logger.Infof("cloudflared has been updated to version %s", updateOutcome.Version) + log.Info().Msgf("cloudflared has been updated to version %s", updateOutcome.Version) } if updateOutcome.Error != nil { - logger.Errorf("update check failed: %s", updateOutcome.Error) + log.Error().Msgf("update check failed: %s", updateOutcome.Error) } return updateOutcome @@ -170,7 +166,7 @@ type AutoUpdater struct { configurable *configurable listeners *gracenet.Net updateConfigChan chan *configurable - logger logger.Service + log *zerolog.Logger } // AutoUpdaterConfigurable is the attributes of AutoUpdater that can be reconfigured during runtime @@ -179,7 +175,7 @@ type configurable struct { freq time.Duration } -func NewAutoUpdater(freq time.Duration, listeners *gracenet.Net, logger logger.Service) *AutoUpdater { +func NewAutoUpdater(freq time.Duration, listeners *gracenet.Net, log *zerolog.Logger) *AutoUpdater { updaterConfigurable := &configurable{ enabled: true, freq: freq, @@ -192,7 +188,7 @@ func NewAutoUpdater(freq time.Duration, listeners *gracenet.Net, logger logger.S configurable: updaterConfigurable, listeners: listeners, updateConfigChan: make(chan *configurable), - logger: logger, + log: log, } } @@ -200,19 +196,19 @@ func (a *AutoUpdater) Run(ctx context.Context) error { ticker := time.NewTicker(a.configurable.freq) for { if a.configurable.enabled { - updateOutcome := loggedUpdate(a.logger, updateOptions{}) + updateOutcome := loggedUpdate(a.log, updateOptions{}) if updateOutcome.Updated { if IsSysV() { // SysV doesn't have a mechanism to keep service alive, we have to restart the process - a.logger.Info("Restarting service managed by SysV...") + a.log.Info().Msg("Restarting service managed by SysV...") pid, err := a.listeners.StartProcess() if err != nil { - a.logger.Errorf("Unable to restart server automatically: %s", err) + a.log.Error().Msgf("Unable to restart server automatically: %s", err) return &statusErr{err: err} } // stop old process after autoupdate. Otherwise we create a new process // after each update - a.logger.Infof("PID of the new process is %d", pid) + a.log.Info().Msgf("PID of the new process is %d", pid) } return &statusSuccess{newVersion: updateOutcome.Version} } @@ -244,26 +240,26 @@ func (a *AutoUpdater) Update(newFreq time.Duration) { a.updateConfigChan <- newConfigurable } -func IsAutoupdateEnabled(c *cli.Context, l logger.Service) bool { - if !SupportAutoUpdate(l) { +func IsAutoupdateEnabled(c *cli.Context, log *zerolog.Logger) bool { + if !SupportAutoUpdate(log) { return false } return !c.Bool("no-autoupdate") && c.Duration("autoupdate-freq") != 0 } -func SupportAutoUpdate(logger logger.Service) bool { +func SupportAutoUpdate(log *zerolog.Logger) bool { if runtime.GOOS == "windows" { - logger.Info(noUpdateOnWindowsMessage) + log.Info().Msg(noUpdateOnWindowsMessage) return false } if wasInstalledFromPackageManager() { - logger.Info(noUpdateManagedPackageMessage) + log.Info().Msg(noUpdateManagedPackageMessage) return false } if isRunningFromTerminal() { - logger.Info(noUpdateInShellMessage) + log.Info().Msg(noUpdateInShellMessage) return false } return true diff --git a/cmd/cloudflared/updater/update_test.go b/cmd/cloudflared/updater/update_test.go index d4ad93ee..a20c4ffa 100644 --- a/cmd/cloudflared/updater/update_test.go +++ b/cmd/cloudflared/updater/update_test.go @@ -4,15 +4,15 @@ import ( "context" "testing" - "github.com/cloudflare/cloudflared/logger" "github.com/facebookgo/grace/gracenet" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" ) func TestDisabledAutoUpdater(t *testing.T) { listeners := &gracenet.Net{} - logger := logger.NewOutputWriter(logger.NewMockWriteManager()) - autoupdater := NewAutoUpdater(0, listeners, logger) + log := zerolog.Nop() + autoupdater := NewAutoUpdater(0, listeners, &log) ctx, cancel := context.WithCancel(context.Background()) errC := make(chan error) go func() { diff --git a/cmd/cloudflared/windows_service.go b/cmd/cloudflared/windows_service.go index 109ce696..95b6fc8a 100644 --- a/cmd/cloudflared/windows_service.go +++ b/cmd/cloudflared/windows_service.go @@ -13,9 +13,8 @@ import ( "unsafe" "github.com/cloudflare/cloudflared/logger" - "github.com/pkg/errors" - cli "github.com/urfave/cli/v2" + "github.com/urfave/cli/v2" "golang.org/x/sys/windows" "golang.org/x/sys/windows/svc" "golang.org/x/sys/windows/svc/eventlog" @@ -67,15 +66,11 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) { // 2. get ERROR_FAILED_SERVICE_CONTROLLER_CONNECT // This involves actually trying to start the service. - logger, err := logger.New() - if err != nil { - os.Exit(1) - return - } + log := logger.Create(nil) isIntSess, err := svc.IsAnInteractiveSession() if err != nil { - logger.Fatalf("failed to determine if we are running in an interactive session: %v", err) + log.Fatal().Msgf("failed to determine if we are running in an interactive session: %v", err) } if isIntSess { app.Run(os.Args) @@ -93,7 +88,7 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) { app.Run(os.Args) return } - logger.Fatalf("%s service failed: %v", windowsServiceName, err) + log.Fatal().Msgf("%s service failed: %v", windowsServiceName, err) } } @@ -105,15 +100,10 @@ type windowsService struct { // called by the package code at the start of the service func (s *windowsService) Execute(serviceArgs []string, r <-chan svc.ChangeRequest, statusChan chan<- svc.Status) (ssec bool, errno uint32) { - logger, err := logger.New() - if err != nil { - os.Exit(1) - return - } - + log := logger.Create(nil) elog, err := eventlog.Open(windowsServiceName) if err != nil { - logger.Errorf("Cannot open event log for %s with error: %s", windowsServiceName, err) + log.Error().Msgf("Cannot open event log for %s with error: %s", windowsServiceName, err) return } defer elog.Close() @@ -173,79 +163,73 @@ func (s *windowsService) Execute(serviceArgs []string, r <-chan svc.ChangeReques } func installWindowsService(c *cli.Context) error { - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - logger.Infof("Installing Argo Tunnel Windows service") + log.Info().Msgf("Installing Argo Tunnel Windows service") exepath, err := os.Executable() if err != nil { - logger.Errorf("Cannot find path name that start the process") + log.Error().Msgf("Cannot find path name that start the process") return err } m, err := mgr.Connect() if err != nil { - logger.Errorf("Cannot establish a connection to the service control manager: %s", err) + log.Error().Msgf("Cannot establish a connection to the service control manager: %s", err) return err } defer m.Disconnect() s, err := m.OpenService(windowsServiceName) if err == nil { s.Close() - logger.Errorf("service %s already exists", windowsServiceName) + log.Error().Msgf("service %s already exists", windowsServiceName) return fmt.Errorf("service %s already exists", windowsServiceName) } config := mgr.Config{StartType: mgr.StartAutomatic, DisplayName: windowsServiceDescription} s, err = m.CreateService(windowsServiceName, exepath, config) if err != nil { - logger.Errorf("Cannot install service %s", windowsServiceName) + log.Error().Msgf("Cannot install service %s", windowsServiceName) return err } defer s.Close() - logger.Infof("Argo Tunnel agent service is installed") + log.Info().Msgf("Argo Tunnel agent service is installed") err = eventlog.InstallAsEventCreate(windowsServiceName, eventlog.Error|eventlog.Warning|eventlog.Info) if err != nil { s.Delete() - logger.Errorf("Cannot install event logger: %s", err) + log.Error().Msgf("Cannot install event logger: %s", err) return fmt.Errorf("SetupEventLogSource() failed: %s", err) } err = configRecoveryOption(s.Handle) if err != nil { - logger.Errorf("Cannot set service recovery actions: %s", err) - logger.Infof("See %s to manually configure service recovery actions", windowsServiceUrl) + log.Error().Msgf("Cannot set service recovery actions: %s", err) + log.Info().Msgf("See %s to manually configure service recovery actions", windowsServiceUrl) } return nil } func uninstallWindowsService(c *cli.Context) error { - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return errors.Wrap(err, "error setting up logger") - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - logger.Infof("Uninstalling Argo Tunnel Windows Service") + log.Info().Msgf("Uninstalling Argo Tunnel Windows Service") m, err := mgr.Connect() if err != nil { - logger.Errorf("Cannot establish a connection to the service control manager") + log.Error().Msgf("Cannot establish a connection to the service control manager") return err } defer m.Disconnect() s, err := m.OpenService(windowsServiceName) if err != nil { - logger.Errorf("service %s is not installed", windowsServiceName) + log.Error().Msgf("service %s is not installed", windowsServiceName) return fmt.Errorf("service %s is not installed", windowsServiceName) } defer s.Close() err = s.Delete() if err != nil { - logger.Errorf("Cannot delete service %s", windowsServiceName) + log.Error().Msgf("Cannot delete service %s", windowsServiceName) return err } - logger.Infof("Argo Tunnel agent service is uninstalled") + log.Info().Msgf("Argo Tunnel agent service is uninstalled") err = eventlog.Remove(windowsServiceName) if err != nil { - logger.Errorf("Cannot remove event logger") + log.Error().Msgf("Cannot remove event logger") return fmt.Errorf("RemoveEventLogSource() failed: %s", err) } return nil diff --git a/connection/connection_test.go b/connection/connection_test.go index 6c55ed7e..33ec7433 100644 --- a/connection/connection_test.go +++ b/connection/connection_test.go @@ -8,8 +8,8 @@ import ( "testing" "time" - "github.com/cloudflare/cloudflared/logger" "github.com/gobwas/ws/wsutil" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" ) @@ -22,14 +22,14 @@ var ( OriginClient: &mockOriginClient{}, GracePeriod: time.Millisecond * 100, } - testLogger, _ = logger.New() + log = zerolog.Nop() testOriginURL = &url.URL{ Scheme: "https", Host: "connectiontest.argotunnel.com", } testTunnelEventChan = make(chan Event) testObserver = &Observer{ - testLogger, + &log, m, []chan Event{testTunnelEventChan}, false, @@ -81,7 +81,7 @@ func wsEndpoint(w ResponseWriter, r *http.Request) error { resp := &http.Response{ StatusCode: http.StatusSwitchingProtocols, } - w.WriteRespHeaders(resp) + _ = w.WriteRespHeaders(resp) clientReader := nowriter{r.Body} go func() { for { @@ -102,8 +102,8 @@ func originRespEndpoint(w ResponseWriter, status int, data []byte) { resp := &http.Response{ StatusCode: status, } - w.WriteRespHeaders(resp) - w.Write(data) + _ = w.WriteRespHeaders(resp) + _, _ = w.Write(data) } type mockConnectedFuse struct{} diff --git a/connection/errors.go b/connection/errors.go index 521a0349..b40e4b35 100644 --- a/connection/errors.go +++ b/connection/errors.go @@ -65,11 +65,11 @@ func (e muxerShutdownError) Error() string { func isHandshakeErrRecoverable(err error, connIndex uint8, observer *Observer) bool { switch err.(type) { case edgediscovery.DialError: - observer.Errorf("Connection %d unable to dial edge: %s", connIndex, err) + observer.log.Error().Msgf("Connection %d unable to dial edge: %s", connIndex, err) case h2mux.MuxerHandshakeError: - observer.Errorf("Connection %d handshake with edge server failed: %s", connIndex, err) + observer.log.Error().Msgf("Connection %d handshake with edge server failed: %s", connIndex, err) default: - observer.Errorf("Connection %d failed: %s", connIndex, err) + observer.log.Error().Msgf("Connection %d failed: %s", connIndex, err) return false } return true diff --git a/connection/h2mux.go b/connection/h2mux.go index d3843123..66403f88 100644 --- a/connection/h2mux.go +++ b/connection/h2mux.go @@ -7,10 +7,11 @@ import ( "time" "github.com/cloudflare/cloudflared/h2mux" - "github.com/cloudflare/cloudflared/logger" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" "github.com/cloudflare/cloudflared/websocket" + "github.com/pkg/errors" + "github.com/rs/zerolog" "golang.org/x/sync/errgroup" ) @@ -37,14 +38,14 @@ type MuxerConfig struct { MetricsUpdateFreq time.Duration } -func (mc *MuxerConfig) H2MuxerConfig(h h2mux.MuxedStreamHandler, logger logger.Service) *h2mux.MuxerConfig { +func (mc *MuxerConfig) H2MuxerConfig(h h2mux.MuxedStreamHandler, log *zerolog.Logger) *h2mux.MuxerConfig { return &h2mux.MuxerConfig{ Timeout: muxerTimeout, Handler: h, IsClient: true, HeartbeatInterval: mc.HeartbeatInterval, MaxHeartbeats: mc.MaxHeartbeats, - Logger: logger, + Log: log, CompressionQuality: mc.CompressionSetting, } } @@ -67,7 +68,7 @@ func NewH2muxConnection(ctx context.Context, // Establish a muxed connection with the edge // Client mux handshake with agent server - muxer, err := h2mux.Handshake(edgeConn, edgeConn, *muxerConfig.H2MuxerConfig(h, observer), h2mux.ActiveStreams) + muxer, err := h2mux.Handshake(edgeConn, edgeConn, *muxerConfig.H2MuxerConfig(h, observer.log), h2mux.ActiveStreams) if err != nil { recoverable := isHandshakeErrRecoverable(err, connIndex, observer) return nil, err, recoverable @@ -87,7 +88,7 @@ func (h *h2muxConnection) ServeNamedTunnel(ctx context.Context, namedTunnel *Nam if err != nil { return err } - rpcClient := newRegistrationRPCClient(ctx, stream, h.observer) + rpcClient := newRegistrationRPCClient(ctx, stream, h.observer.log) defer rpcClient.Close() if err = rpcClient.RegisterConnection(serveCtx, namedTunnel, connOptions, h.connIndex, h.observer); err != nil { @@ -122,7 +123,7 @@ func (h *h2muxConnection) ServeClassicTunnel(ctx context.Context, classicTunnel return nil } // log errors and proceed to RegisterTunnel - h.observer.Errorf("Couldn't reconnect connection %d. Reregistering it instead. Error was: %v", h.connIndex, err) + h.observer.log.Error().Msgf("Couldn't reconnect connection %d. Reregistering it instead. Error was: %v", h.connIndex, err) } return h.registerTunnel(ctx, credentialManager, classicTunnel, registrationOptions) }) @@ -212,9 +213,9 @@ func (rp *h2muxRespWriter) WriteRespHeaders(resp *http.Response) error { } func (rp *h2muxRespWriter) WriteErrorResponse() { - rp.WriteHeaders([]h2mux.Header{ + _ = rp.WriteHeaders([]h2mux.Header{ {Name: ":status", Value: "502"}, {Name: ResponseMetaHeaderField, Value: responseMetaHeaderCfd}, }) - rp.Write([]byte("502 Bad Gateway")) + _, _ = rp.Write([]byte("502 Bad Gateway")) } diff --git a/connection/h2mux_test.go b/connection/h2mux_test.go index 4acd1c2b..b2f60383 100644 --- a/connection/h2mux_test.go +++ b/connection/h2mux_test.go @@ -31,7 +31,7 @@ func newH2MuxConnection(ctx context.Context, t require.TestingT) (*h2muxConnecti edgeMuxChan := make(chan *h2mux.Muxer) go func() { edgeMuxConfig := h2mux.MuxerConfig{ - Logger: testObserver, + Log: testObserver.log, } edgeMux, err := h2mux.Handshake(edgeConn, edgeConn, edgeMuxConfig, h2mux.ActiveStreams) require.NoError(t, err) @@ -85,7 +85,7 @@ func TestServeStreamHTTP(t *testing.T) { wg.Add(2) go func() { defer wg.Done() - edgeMux.Serve(ctx) + _ = edgeMux.Serve(ctx) }() go func() { defer wg.Done() diff --git a/connection/http2.go b/connection/http2.go index f148c91b..f9c1e0c0 100644 --- a/connection/http2.go +++ b/connection/http2.go @@ -11,9 +11,9 @@ import ( "sync" "github.com/cloudflare/cloudflared/h2mux" - "github.com/cloudflare/cloudflared/logger" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + "github.com/rs/zerolog" "golang.org/x/net/http2" ) @@ -38,7 +38,7 @@ type http2Connection struct { connIndex uint8 wg *sync.WaitGroup // newRPCClientFunc allows us to mock RPCs during testing - newRPCClientFunc func(context.Context, io.ReadWriteCloser, logger.Service) NamedTunnelRPCClient + newRPCClientFunc func(context.Context, io.ReadWriteCloser, *zerolog.Logger) NamedTunnelRPCClient connectedFuse ConnectedFuse } @@ -89,7 +89,7 @@ func (c *http2Connection) ServeHTTP(w http.ResponseWriter, r *http.Request) { } flusher, isFlusher := w.(http.Flusher) if !isFlusher { - c.observer.Errorf("%T doesn't implement http.Flusher", w) + c.observer.log.Error().Msgf("%T doesn't implement http.Flusher", w) respWriter.WriteErrorResponse() return } @@ -112,7 +112,7 @@ func (c *http2Connection) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (c *http2Connection) serveControlStream(ctx context.Context, respWriter *http2RespWriter) error { - rpcClient := c.newRPCClientFunc(ctx, respWriter, c.observer) + rpcClient := c.newRPCClientFunc(ctx, respWriter, c.observer.log) defer rpcClient.Close() if err := rpcClient.RegisterConnection(ctx, c.namedTunnel, c.connOptions, c.connIndex, c.observer); err != nil { diff --git a/connection/http2_test.go b/connection/http2_test.go index 4b2439f9..2d3c30cb 100644 --- a/connection/http2_test.go +++ b/connection/http2_test.go @@ -12,10 +12,11 @@ import ( "testing" "time" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/tunnelrpc/pogs" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + "github.com/gobwas/ws/wsutil" + "github.com/rs/zerolog" "github.com/stretchr/testify/require" "golang.org/x/net/http2" ) @@ -136,7 +137,7 @@ type mockRPCClientFactory struct { unregistered chan struct{} } -func (mf *mockRPCClientFactory) newMockRPCClient(context.Context, io.ReadWriteCloser, logger.Service) NamedTunnelRPCClient { +func (mf *mockRPCClientFactory) newMockRPCClient(context.Context, io.ReadWriteCloser, *zerolog.Logger) NamedTunnelRPCClient { return mockNamedTunnelRPCClient{ registered: mf.registered, unregistered: mf.unregistered, diff --git a/connection/observer.go b/connection/observer.go index b6d9aeaa..12324ef1 100644 --- a/connection/observer.go +++ b/connection/observer.go @@ -5,20 +5,21 @@ import ( "net/url" "strings" - "github.com/cloudflare/cloudflared/logger" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + + "github.com/rs/zerolog" ) type Observer struct { - logger.Service + log *zerolog.Logger metrics *tunnelMetrics tunnelEventChans []chan Event uiEnabled bool } -func NewObserver(logger logger.Service, tunnelEventChans []chan Event, uiEnabled bool) *Observer { +func NewObserver(log *zerolog.Logger, tunnelEventChans []chan Event, uiEnabled bool) *Observer { return &Observer{ - logger, + log, newTunnelMetrics(), tunnelEventChans, uiEnabled, @@ -27,7 +28,7 @@ func NewObserver(logger logger.Service, tunnelEventChans []chan Event, uiEnabled func (o *Observer) logServerInfo(connIndex uint8, location, msg string) { o.sendEvent(Event{Index: connIndex, EventType: Connected, Location: location}) - o.Infof(msg) + o.log.Info().Msgf(msg) o.metrics.registerServerLocation(uint8ToString(connIndex), location) } @@ -36,10 +37,10 @@ func (o *Observer) logTrialHostname(registration *tunnelpogs.TunnelRegistration) if !o.uiEnabled { if registrationURL, err := url.Parse(registration.Url); err == nil { for _, line := range asciiBox(trialZoneMsg(registrationURL.String()), 2) { - o.Info(line) + o.log.Info().Msg(line) } } else { - o.Error("Failed to connect tunnel, please try again.") + o.log.Error().Msg("Failed to connect tunnel, please try again.") return fmt.Errorf("empty URL in response from Cloudflare edge") } } diff --git a/connection/protocol.go b/connection/protocol.go index a4da1578..8e12f29c 100644 --- a/connection/protocol.go +++ b/connection/protocol.go @@ -6,7 +6,7 @@ import ( "sync" "time" - "github.com/cloudflare/cloudflared/logger" + "github.com/rs/zerolog" ) const ( @@ -89,7 +89,7 @@ type autoProtocolSelector struct { fetchFunc PercentageFetcher refreshAfter time.Time ttl time.Duration - logger logger.Service + log *zerolog.Logger } func newAutoProtocolSelector( @@ -97,7 +97,7 @@ func newAutoProtocolSelector( switchThrehold int32, fetchFunc PercentageFetcher, ttl time.Duration, - logger logger.Service, + log *zerolog.Logger, ) *autoProtocolSelector { return &autoProtocolSelector{ current: current, @@ -105,7 +105,7 @@ func newAutoProtocolSelector( fetchFunc: fetchFunc, refreshAfter: time.Now().Add(ttl), ttl: ttl, - logger: logger, + log: log, } } @@ -118,7 +118,7 @@ func (s *autoProtocolSelector) Current() Protocol { percentage, err := s.fetchFunc() if err != nil { - s.logger.Errorf("Failed to refresh protocol, err: %v", err) + s.log.Error().Msgf("Failed to refresh protocol, err: %v", err) return s.current } @@ -139,7 +139,13 @@ func (s *autoProtocolSelector) Fallback() (Protocol, bool) { type PercentageFetcher func() (int32, error) -func NewProtocolSelector(protocolFlag string, namedTunnel *NamedTunnelConfig, fetchFunc PercentageFetcher, ttl time.Duration, logger logger.Service) (ProtocolSelector, error) { +func NewProtocolSelector( + protocolFlag string, + namedTunnel *NamedTunnelConfig, + fetchFunc PercentageFetcher, + ttl time.Duration, + log *zerolog.Logger, +) (ProtocolSelector, error) { if namedTunnel == nil { return &staticProtocolSelector{ current: H2mux, @@ -157,9 +163,9 @@ func NewProtocolSelector(protocolFlag string, namedTunnel *NamedTunnelConfig, fe } if protocolFlag == HTTP2.String() { if http2Percentage < 0 { - return newAutoProtocolSelector(H2mux, explicitHTTP2FallbackThreshold, fetchFunc, ttl, logger), nil + return newAutoProtocolSelector(H2mux, explicitHTTP2FallbackThreshold, fetchFunc, ttl, log), nil } - return newAutoProtocolSelector(HTTP2, explicitHTTP2FallbackThreshold, fetchFunc, ttl, logger), nil + return newAutoProtocolSelector(HTTP2, explicitHTTP2FallbackThreshold, fetchFunc, ttl, log), nil } if protocolFlag != autoSelectFlag { @@ -167,13 +173,13 @@ func NewProtocolSelector(protocolFlag string, namedTunnel *NamedTunnelConfig, fe } threshold := switchThreshold(namedTunnel.Credentials.AccountTag) if threshold < http2Percentage { - return newAutoProtocolSelector(HTTP2, threshold, fetchFunc, ttl, logger), nil + return newAutoProtocolSelector(HTTP2, threshold, fetchFunc, ttl, log), nil } - return newAutoProtocolSelector(H2mux, threshold, fetchFunc, ttl, logger), nil + return newAutoProtocolSelector(H2mux, threshold, fetchFunc, ttl, log), nil } func switchThreshold(accountTag string) int32 { h := fnv.New32a() - h.Write([]byte(accountTag)) + _, _ = h.Write([]byte(accountTag)) return int32(h.Sum32() % 100) } diff --git a/connection/protocol_test.go b/connection/protocol_test.go index a3d856c0..2100a32e 100644 --- a/connection/protocol_test.go +++ b/connection/protocol_test.go @@ -5,7 +5,6 @@ import ( "testing" "time" - "github.com/cloudflare/cloudflared/logger" "github.com/stretchr/testify/assert" ) @@ -130,9 +129,9 @@ func TestNewProtocolSelector(t *testing.T) { wantErr: true, }, } - logger, _ := logger.New() + for _, test := range tests { - selector, err := NewProtocolSelector(test.protocol, test.namedTunnelConfig, test.fetchFunc, testNoTTL, logger) + selector, err := NewProtocolSelector(test.protocol, test.namedTunnelConfig, test.fetchFunc, testNoTTL, &log) if test.wantErr { assert.Error(t, err, fmt.Sprintf("test %s failed", test.name)) } else { @@ -148,9 +147,8 @@ func TestNewProtocolSelector(t *testing.T) { } func TestAutoProtocolSelectorRefresh(t *testing.T) { - logger, _ := logger.New() fetcher := dynamicMockFetcher{} - selector, err := NewProtocolSelector("auto", testNamedTunnelConfig, fetcher.fetch(), testNoTTL, logger) + selector, err := NewProtocolSelector("auto", testNamedTunnelConfig, fetcher.fetch(), testNoTTL, &log) assert.NoError(t, err) assert.Equal(t, H2mux, selector.Current()) @@ -178,9 +176,8 @@ func TestAutoProtocolSelectorRefresh(t *testing.T) { } func TestHTTP2ProtocolSelectorRefresh(t *testing.T) { - logger, _ := logger.New() fetcher := dynamicMockFetcher{} - selector, err := NewProtocolSelector("http2", testNamedTunnelConfig, fetcher.fetch(), testNoTTL, logger) + selector, err := NewProtocolSelector("http2", testNamedTunnelConfig, fetcher.fetch(), testNoTTL, &log) assert.NoError(t, err) assert.Equal(t, HTTP2, selector.Current()) @@ -208,9 +205,8 @@ func TestHTTP2ProtocolSelectorRefresh(t *testing.T) { } func TestProtocolSelectorRefreshTTL(t *testing.T) { - logger, _ := logger.New() fetcher := dynamicMockFetcher{percentage: 100} - selector, err := NewProtocolSelector("auto", testNamedTunnelConfig, fetcher.fetch(), time.Hour, logger) + selector, err := NewProtocolSelector("auto", testNamedTunnelConfig, fetcher.fetch(), time.Hour, &log) assert.NoError(t, err) assert.Equal(t, HTTP2, selector.Current()) diff --git a/connection/rpc.go b/connection/rpc.go index adb9e10d..b4de0ba0 100644 --- a/connection/rpc.go +++ b/connection/rpc.go @@ -6,9 +6,10 @@ import ( "io" "time" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/tunnelrpc" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + + "github.com/rs/zerolog" "zombiezen.com/go/capnproto2/rpc" ) @@ -22,12 +23,12 @@ type tunnelServerClient struct { func NewTunnelServerClient( ctx context.Context, stream io.ReadWriteCloser, - logger logger.Service, + log *zerolog.Logger, ) *tunnelServerClient { - transport := tunnelrpc.NewTransportLogger(logger, rpc.StreamTransport(stream)) + transport := tunnelrpc.NewTransportLogger(log, rpc.StreamTransport(stream)) conn := rpc.NewConn( transport, - tunnelrpc.ConnLog(logger), + tunnelrpc.ConnLog(log), ) registrationClient := tunnelpogs.RegistrationServer_PogsClient{Client: conn.Bootstrap(ctx), Conn: conn} return &tunnelServerClient{ @@ -46,8 +47,8 @@ func (tsc *tunnelServerClient) Authenticate(ctx context.Context, classicTunnel * func (tsc *tunnelServerClient) Close() { // Closing the client will also close the connection - tsc.client.Close() - tsc.transport.Close() + _ = tsc.client.Close() + _ = tsc.transport.Close() } type NamedTunnelRPCClient interface { @@ -70,12 +71,12 @@ type registrationServerClient struct { func newRegistrationRPCClient( ctx context.Context, stream io.ReadWriteCloser, - logger logger.Service, + log *zerolog.Logger, ) NamedTunnelRPCClient { - transport := tunnelrpc.NewTransportLogger(logger, rpc.StreamTransport(stream)) + transport := tunnelrpc.NewTransportLogger(log, rpc.StreamTransport(stream)) conn := rpc.NewConn( transport, - tunnelrpc.ConnLog(logger), + tunnelrpc.ConnLog(log), ) return ®istrationServerClient{ client: tunnelpogs.RegistrationServer_PogsClient{Client: conn.Bootstrap(ctx), Conn: conn}, @@ -117,14 +118,14 @@ func (rsc *registrationServerClient) RegisterConnection( func (rsc *registrationServerClient) GracefulShutdown(ctx context.Context, gracePeriod time.Duration) { ctx, cancel := context.WithTimeout(ctx, gracePeriod) defer cancel() - rsc.client.UnregisterConnection(ctx) + _ = rsc.client.UnregisterConnection(ctx) } func (rsc *registrationServerClient) Close() { // Closing the client will also close the connection - rsc.client.Close() + _ = rsc.client.Close() // Closing the transport also closes the stream - rsc.transport.Close() + _ = rsc.transport.Close() } type rpcName string @@ -143,10 +144,10 @@ func (h *h2muxConnection) registerTunnel(ctx context.Context, credentialSetter C if err != nil { return err } - rpcClient := NewTunnelServerClient(ctx, stream, h.observer) + rpcClient := NewTunnelServerClient(ctx, stream, h.observer.log) defer rpcClient.Close() - h.logServerInfo(ctx, rpcClient) + _ = h.logServerInfo(ctx, rpcClient) registration := rpcClient.client.RegisterTunnel( ctx, classicTunnel.OriginCert, @@ -178,12 +179,12 @@ func (h *h2muxConnection) processRegistrationSuccess( credentialManager CredentialManager, classicTunnel *ClassicTunnelConfig, ) error { for _, logLine := range registration.LogLines { - h.observer.Info(logLine) + h.observer.log.Info().Msg(logLine) } if registration.TunnelID != "" { h.observer.metrics.tunnelsHA.AddTunnelID(h.connIndex, registration.TunnelID) - h.observer.Infof("Each HA connection's tunnel IDs: %v", h.observer.metrics.tunnelsHA.String()) + h.observer.log.Info().Msgf("Each HA connection's tunnel IDs: %v", h.observer.metrics.tunnelsHA.String()) } // Print out the user's trial zone URL in a nice box (if they requested and got one and UI flag is not set) @@ -197,7 +198,7 @@ func (h *h2muxConnection) processRegistrationSuccess( credentialManager.SetConnDigest(h.connIndex, registration.ConnDigest) h.observer.metrics.userHostnamesCounts.WithLabelValues(registration.Url).Inc() - h.observer.Infof("Route propagating, it may take up to 1 minute for your new route to become functional") + h.observer.log.Info().Msgf("Route propagating, it may take up to 1 minute for your new route to become functional") h.observer.metrics.regSuccess.WithLabelValues(string(name)).Inc() return nil } @@ -228,15 +229,15 @@ func (h *h2muxConnection) reconnectTunnel(ctx context.Context, credentialManager return err } - h.observer.Debug("initiating RPC stream to reconnect") + h.observer.log.Debug().Msg("initiating RPC stream to reconnect") stream, err := h.newRPCStream(ctx, register) if err != nil { return err } - rpcClient := NewTunnelServerClient(ctx, stream, h.observer) + rpcClient := NewTunnelServerClient(ctx, stream, h.observer.log) defer rpcClient.Close() - h.logServerInfo(ctx, rpcClient) + _ = h.logServerInfo(ctx, rpcClient) registration := rpcClient.client.ReconnectTunnel( ctx, token, @@ -259,15 +260,15 @@ func (h *h2muxConnection) logServerInfo(ctx context.Context, rpcClient *tunnelSe }) serverInfoMessage, err := serverInfoPromise.Result().Struct() if err != nil { - h.observer.Errorf("Failed to retrieve server information: %s", err) + h.observer.log.Error().Msgf("Failed to retrieve server information: %s", err) return err } serverInfo, err := tunnelpogs.UnmarshalServerInfo(serverInfoMessage) if err != nil { - h.observer.Errorf("Failed to retrieve server information: %s", err) + h.observer.log.Error().Msgf("Failed to retrieve server information: %s", err) return err } - h.observer.logServerInfo(h.connIndex, serverInfo.LocationName, fmt.Sprintf("Connnection %d connected to %s", h.connIndex, serverInfo.LocationName)) + h.observer.logServerInfo(h.connIndex, serverInfo.LocationName, fmt.Sprintf("Connection %d connected to %s", h.connIndex, serverInfo.LocationName)) return nil } @@ -281,15 +282,15 @@ func (h *h2muxConnection) unregister(isNamedTunnel bool) { } if isNamedTunnel { - rpcClient := newRegistrationRPCClient(unregisterCtx, stream, h.observer) + rpcClient := newRegistrationRPCClient(unregisterCtx, stream, h.observer.log) defer rpcClient.Close() rpcClient.GracefulShutdown(unregisterCtx, h.config.GracePeriod) } else { - rpcClient := NewTunnelServerClient(unregisterCtx, stream, h.observer) + rpcClient := NewTunnelServerClient(unregisterCtx, stream, h.observer.log) defer rpcClient.Close() // gracePeriod is encoded in int64 using capnproto - rpcClient.client.UnregisterTunnel(unregisterCtx, h.config.GracePeriod.Nanoseconds()) + _ = rpcClient.client.UnregisterTunnel(unregisterCtx, h.config.GracePeriod.Nanoseconds()) } } diff --git a/dbconnect/proxy.go b/dbconnect/proxy.go index fc45676e..615f1305 100644 --- a/dbconnect/proxy.go +++ b/dbconnect/proxy.go @@ -8,20 +8,21 @@ import ( "net" "net/http" "net/url" + "os" "time" "github.com/cloudflare/cloudflared/hello" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/validation" "github.com/gorilla/mux" "github.com/pkg/errors" + "github.com/rs/zerolog" ) // Proxy is an HTTP server that proxies requests to a Client. type Proxy struct { client Client accessValidator *validation.Access - logger logger.Service + log *zerolog.Logger } // NewInsecureProxy creates a Proxy that talks to a Client at an origin. @@ -43,12 +44,9 @@ func NewInsecureProxy(ctx context.Context, origin string) (*Proxy, error) { return nil, errors.Wrap(err, "could not connect to the database") } - logger, err := logger.New() // TODO: Does not obey log configuration - if err != nil { - return nil, errors.Wrap(err, "error setting up logger") - } + log := zerolog.New(os.Stderr).With().Logger() // TODO: Does not obey log configuration - return &Proxy{client, nil, logger}, nil + return &Proxy{client, nil, &log}, nil } // NewSecureProxy creates a Proxy that talks to a Client at an origin. @@ -96,7 +94,7 @@ func (proxy *Proxy) IsAllowed(r *http.Request, verbose ...bool) bool { // of either a misconfiguration of the CLI or a massive failure of upstream systems. if len(verbose) > 0 { cfRay := proxy.getRayHeader(r) - proxy.logger.Infof("dbproxy: Failed JWT authentication: cf-ray: %s %s", cfRay, err) + proxy.log.Info().Msgf("dbproxy: Failed JWT authentication: cf-ray: %s %s", cfRay, err) } return false @@ -151,8 +149,8 @@ func (proxy *Proxy) httpListen(ctx context.Context, listener net.Listener) error go func() { <-ctx.Done() - httpServer.Close() - listener.Close() + _ = httpServer.Close() + _ = listener.Close() }() return httpServer.Serve(listener) @@ -241,7 +239,7 @@ func (proxy *Proxy) httpRespondErr(w http.ResponseWriter, r *http.Request, defau proxy.httpRespond(w, r, status, err.Error()) if len(err.Error()) > 0 { cfRay := proxy.getRayHeader(r) - proxy.logger.Infof("dbproxy: Database proxy error: cf-ray: %s %s", cfRay, err) + proxy.log.Info().Msgf("dbproxy: Database proxy error: cf-ray: %s %s", cfRay, err) } } diff --git a/dbconnect/sql.go b/dbconnect/sql.go index 2bd5b56f..6939b0d0 100644 --- a/dbconnect/sql.go +++ b/dbconnect/sql.go @@ -46,7 +46,7 @@ func NewSQLClient(ctx context.Context, originURL *url.URL) (Client, error) { // Closes the driver, will occur when the context finishes. go func() { <-ctx.Done() - driver.Close() + _ = driver.Close() }() return &SQLClient{driver.DriverName(), driver}, nil @@ -260,7 +260,7 @@ func sqlRows(rows *sql.Rows) ([]map[string]interface{}, error) { for i := range columns { pointers[i] = &values[i] } - rows.Scan(pointers...) + _ = rows.Scan(pointers...) // Convert a row, an array of values, into an object where // each key is the name of its respective column. diff --git a/edgediscovery/allregions/discovery.go b/edgediscovery/allregions/discovery.go index 9c2b1275..665920a3 100644 --- a/edgediscovery/allregions/discovery.go +++ b/edgediscovery/allregions/discovery.go @@ -7,8 +7,8 @@ import ( "net" "time" - "github.com/cloudflare/cloudflared/logger" "github.com/pkg/errors" + "github.com/rs/zerolog" ) const ( @@ -58,15 +58,15 @@ var friendlyDNSErrorLines = []string{ } // EdgeDiscovery implements HA service discovery lookup. -func edgeDiscovery(logger logger.Service) ([][]*net.TCPAddr, error) { +func edgeDiscovery(log *zerolog.Logger) ([][]*net.TCPAddr, error) { _, addrs, err := netLookupSRV(srvService, srvProto, srvName) if err != nil { _, fallbackAddrs, fallbackErr := fallbackLookupSRV(srvService, srvProto, srvName) if fallbackErr != nil || len(fallbackAddrs) == 0 { // use the original DNS error `err` in messages, not `fallbackErr` - logger.Errorf("Error looking up Cloudflare edge IPs: the DNS query failed: %s", err) + log.Error().Msgf("Error looking up Cloudflare edge IPs: the DNS query failed: %s", err) for _, s := range friendlyDNSErrorLines { - logger.Error(s) + log.Error().Msg(s) } return nil, errors.Wrapf(err, "Could not lookup srv records on _%v._%v.%v", srvService, srvProto, srvName) } @@ -122,11 +122,11 @@ func resolveSRVToTCP(srv *net.SRV) ([]*net.TCPAddr, error) { // ResolveAddrs resolves TCP address given a list of addresses. Address can be a hostname, however, it will return at most one // of the hostname's IP addresses. -func ResolveAddrs(addrs []string, logger logger.Service) (resolved []*net.TCPAddr) { +func ResolveAddrs(addrs []string, log *zerolog.Logger) (resolved []*net.TCPAddr) { for _, addr := range addrs { tcpAddr, err := net.ResolveTCPAddr("tcp", addr) if err != nil { - logger.Errorf("Failed to resolve %s, err: %v", addr, err) + log.Error().Msgf("Failed to resolve %s, err: %v", addr, err) } else { resolved = append(resolved, tcpAddr) } diff --git a/edgediscovery/allregions/discovery_test.go b/edgediscovery/allregions/discovery_test.go index b2be4350..82f03ac1 100644 --- a/edgediscovery/allregions/discovery_test.go +++ b/edgediscovery/allregions/discovery_test.go @@ -3,7 +3,7 @@ package allregions import ( "testing" - "github.com/cloudflare/cloudflared/logger" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" ) @@ -19,8 +19,8 @@ func TestEdgeDiscovery(t *testing.T) { } } - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - addrLists, err := edgeDiscovery(l) + l := zerolog.Nop() + addrLists, err := edgeDiscovery(&l) assert.NoError(t, err) actualAddrSet := map[string]bool{} for _, addrs := range addrLists { diff --git a/edgediscovery/allregions/region.go b/edgediscovery/allregions/region.go index aee3fd93..30b19808 100644 --- a/edgediscovery/allregions/region.go +++ b/edgediscovery/allregions/region.go @@ -57,7 +57,6 @@ func (r Region) GetUnusedIP(excluding *net.TCPAddr) *net.TCPAddr { // Use the address, assigning it to a proxy connection. func (r Region) Use(addr *net.TCPAddr, connID int) { if addr == nil { - //logrus.Errorf("Attempted to use nil address for connection %d", connID) return } r.connFor[addr] = InUse(connID) diff --git a/edgediscovery/allregions/regions.go b/edgediscovery/allregions/regions.go index eeed1b0c..cabb871c 100644 --- a/edgediscovery/allregions/regions.go +++ b/edgediscovery/allregions/regions.go @@ -4,7 +4,7 @@ import ( "fmt" "net" - "github.com/cloudflare/cloudflared/logger" + "github.com/rs/zerolog" ) // Regions stores Cloudflare edge network IPs, partitioned into two regions. @@ -19,8 +19,8 @@ type Regions struct { // ------------------------------------ // ResolveEdge resolves the Cloudflare edge, returning all regions discovered. -func ResolveEdge(logger logger.Service) (*Regions, error) { - addrLists, err := edgeDiscovery(logger) +func ResolveEdge(log *zerolog.Logger) (*Regions, error) { + addrLists, err := edgeDiscovery(log) if err != nil { return nil, err } @@ -35,8 +35,8 @@ func ResolveEdge(logger logger.Service) (*Regions, error) { // StaticEdge creates a list of edge addresses from the list of hostnames. // Mainly used for testing connectivity. -func StaticEdge(hostnames []string, logger logger.Service) (*Regions, error) { - resolved := ResolveAddrs(hostnames, logger) +func StaticEdge(hostnames []string, log *zerolog.Logger) (*Regions, error) { + resolved := ResolveAddrs(hostnames, log) if len(resolved) == 0 { return nil, fmt.Errorf("failed to resolve any edge address") } diff --git a/edgediscovery/edgediscovery.go b/edgediscovery/edgediscovery.go index df4ff01a..261d52bf 100644 --- a/edgediscovery/edgediscovery.go +++ b/edgediscovery/edgediscovery.go @@ -6,7 +6,7 @@ import ( "sync" "github.com/cloudflare/cloudflared/edgediscovery/allregions" - "github.com/cloudflare/cloudflared/logger" + "github.com/rs/zerolog" ) const ( @@ -19,7 +19,7 @@ var errNoAddressesLeft = fmt.Errorf("There are no free edge addresses left") type Edge struct { regions *allregions.Regions sync.Mutex - logger logger.Service + log *zerolog.Logger } // ------------------------------------ @@ -28,34 +28,34 @@ type Edge struct { // ResolveEdge runs the initial discovery of the Cloudflare edge, finding Addrs that can be allocated // to connections. -func ResolveEdge(l logger.Service) (*Edge, error) { - regions, err := allregions.ResolveEdge(l) +func ResolveEdge(log *zerolog.Logger) (*Edge, error) { + regions, err := allregions.ResolveEdge(log) if err != nil { return new(Edge), err } return &Edge{ - logger: l, + log: log, regions: regions, }, nil } // StaticEdge creates a list of edge addresses from the list of hostnames. Mainly used for testing connectivity. -func StaticEdge(l logger.Service, hostnames []string) (*Edge, error) { - regions, err := allregions.StaticEdge(hostnames, l) +func StaticEdge(log *zerolog.Logger, hostnames []string) (*Edge, error) { + regions, err := allregions.StaticEdge(hostnames, log) if err != nil { return new(Edge), err } return &Edge{ - logger: l, + log: log, regions: regions, }, nil } // MockEdge creates a Cloudflare Edge from arbitrary TCP addresses. Used for testing. -func MockEdge(l logger.Service, addrs []*net.TCPAddr) *Edge { +func MockEdge(log *zerolog.Logger, addrs []*net.TCPAddr) *Edge { regions := allregions.NewNoResolve(addrs) return &Edge{ - logger: l, + log: log, regions: regions, } } @@ -82,17 +82,17 @@ func (ed *Edge) GetAddr(connID int) (*net.TCPAddr, error) { // If this connection has already used an edge addr, return it. if addr := ed.regions.AddrUsedBy(connID); addr != nil { - ed.logger.Debugf("edgediscovery - GetAddr: Returning same address back to proxy connection: connID: %d", connID) + ed.log.Debug().Msgf("edgediscovery - GetAddr: Returning same address back to proxy connection: connID: %d", connID) return addr, nil } // Otherwise, give it an unused one addr := ed.regions.GetUnusedAddr(nil, connID) if addr == nil { - ed.logger.Debugf("edgediscovery - GetAddr: No addresses left to give proxy connection: connID: %d", connID) + ed.log.Debug().Msgf("edgediscovery - GetAddr: No addresses left to give proxy connection: connID: %d", connID) return nil, errNoAddressesLeft } - ed.logger.Debugf("edgediscovery - GetAddr: Giving connection its new address %s: connID: %d", addr, connID) + ed.log.Debug().Msgf("edgediscovery - GetAddr: Giving connection its new address %s: connID: %d", addr, connID) return addr, nil } @@ -107,11 +107,11 @@ func (ed *Edge) GetDifferentAddr(connID int) (*net.TCPAddr, error) { } addr := ed.regions.GetUnusedAddr(oldAddr, connID) if addr == nil { - ed.logger.Debugf("edgediscovery - GetDifferentAddr: No addresses left to give proxy connection: connID: %d", connID) + ed.log.Debug().Msgf("edgediscovery - GetDifferentAddr: No addresses left to give proxy connection: connID: %d", connID) // note: if oldAddr were not nil, it will become available on the next iteration return nil, errNoAddressesLeft } - ed.logger.Debugf("edgediscovery - GetDifferentAddr: Giving connection its new address %s: connID: %d", addr, connID) + ed.log.Debug().Msgf("edgediscovery - GetDifferentAddr: Giving connection its new address %s: connID: %d", addr, connID) return addr, nil } @@ -127,6 +127,6 @@ func (ed *Edge) AvailableAddrs() int { func (ed *Edge) GiveBack(addr *net.TCPAddr) bool { ed.Lock() defer ed.Unlock() - ed.logger.Debug("edgediscovery - GiveBack: Address now unused") + ed.log.Debug().Msg("edgediscovery - GiveBack: Address now unused") return ed.regions.GiveBack(addr) } diff --git a/edgediscovery/edgediscovery_test.go b/edgediscovery/edgediscovery_test.go index 3439861a..e52142a1 100644 --- a/edgediscovery/edgediscovery_test.go +++ b/edgediscovery/edgediscovery_test.go @@ -4,7 +4,7 @@ import ( "net" "testing" - "github.com/cloudflare/cloudflared/logger" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" ) @@ -29,11 +29,12 @@ var ( Port: 8000, Zone: "", } + + log = zerolog.Nop() ) func TestGiveBack(t *testing.T) { - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - edge := MockEdge(l, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3}) + edge := MockEdge(&log, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3}) // Give this connection an address assert.Equal(t, 4, edge.AvailableAddrs()) @@ -49,10 +50,8 @@ func TestGiveBack(t *testing.T) { } func TestRPCAndProxyShareSingleEdgeIP(t *testing.T) { - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - // Make an edge with a single IP - edge := MockEdge(l, []*net.TCPAddr{&addr0}) + edge := MockEdge(&log, []*net.TCPAddr{&addr0}) tunnelConnID := 0 // Use the IP for a tunnel @@ -66,8 +65,7 @@ func TestRPCAndProxyShareSingleEdgeIP(t *testing.T) { } func TestGetAddrForRPC(t *testing.T) { - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - edge := MockEdge(l, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3}) + edge := MockEdge(&log, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3}) // Get a connection assert.Equal(t, 4, edge.AvailableAddrs()) @@ -84,10 +82,8 @@ func TestGetAddrForRPC(t *testing.T) { } func TestOnePerRegion(t *testing.T) { - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - // Make an edge with only one address - edge := MockEdge(l, []*net.TCPAddr{&addr0, &addr1}) + edge := MockEdge(&log, []*net.TCPAddr{&addr0, &addr1}) // Use the only address const connID = 0 @@ -108,10 +104,8 @@ func TestOnePerRegion(t *testing.T) { } func TestOnlyOneAddrLeft(t *testing.T) { - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - // Make an edge with only one address - edge := MockEdge(l, []*net.TCPAddr{&addr0}) + edge := MockEdge(&log, []*net.TCPAddr{&addr0}) // Use the only address const connID = 0 @@ -130,10 +124,8 @@ func TestOnlyOneAddrLeft(t *testing.T) { } func TestNoAddrsLeft(t *testing.T) { - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - // Make an edge with no addresses - edge := MockEdge(l, []*net.TCPAddr{}) + edge := MockEdge(&log, []*net.TCPAddr{}) _, err := edge.GetAddr(2) assert.Error(t, err) @@ -142,8 +134,7 @@ func TestNoAddrsLeft(t *testing.T) { } func TestGetAddr(t *testing.T) { - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - edge := MockEdge(l, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3}) + edge := MockEdge(&log, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3}) // Give this connection an address const connID = 0 @@ -158,8 +149,7 @@ func TestGetAddr(t *testing.T) { } func TestGetDifferentAddr(t *testing.T) { - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - edge := MockEdge(l, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3}) + edge := MockEdge(&log, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3}) // Give this connection an address assert.Equal(t, 4, edge.AvailableAddrs()) diff --git a/go.mod b/go.mod index a64e88a6..0ed45f2f 100644 --- a/go.mod +++ b/go.mod @@ -50,6 +50,7 @@ require ( github.com/prometheus/client_golang v1.7.1 github.com/prometheus/common v0.13.0 // indirect github.com/rivo/tview v0.0.0-20200712113419-c65badfc3d92 + github.com/rs/zerolog v1.20.0 github.com/stretchr/testify v1.6.0 github.com/urfave/cli/v2 v2.2.0 github.com/xo/dburl v0.0.0-20191005012637-293c3298d6c0 diff --git a/go.sum b/go.sum index 09fd6d7f..0b63b362 100644 --- a/go.sum +++ b/go.sum @@ -143,6 +143,7 @@ github.com/coreos/go-oidc v0.0.0-20171002155002-a93f71fdfe73 h1:7CNPV0LWRCa1FNmq github.com/coreos/go-oidc v0.0.0-20171002155002-a93f71fdfe73/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= @@ -565,6 +566,9 @@ github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY= github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs= +github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4 h1:S9YlS71UNJIyS61OqGAmLXv3w5zclSidN+qwr80XxKs= github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q= @@ -837,6 +841,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= diff --git a/h2mux/h2mux.go b/h2mux/h2mux.go index e1b86411..c1bf05bb 100644 --- a/h2mux/h2mux.go +++ b/h2mux/h2mux.go @@ -8,11 +8,10 @@ import ( "time" "github.com/prometheus/client_golang/prometheus" + "github.com/rs/zerolog" "golang.org/x/net/http2" "golang.org/x/net/http2/hpack" "golang.org/x/sync/errgroup" - - "github.com/cloudflare/cloudflared/logger" ) const ( @@ -50,7 +49,7 @@ type MuxerConfig struct { // The minimum number of heartbeats to send before terminating the connection. MaxHeartbeats uint64 // Logger to use - Logger logger.Service + Log *zerolog.Logger CompressionQuality CompressionSetting // Initial size for HTTP2 flow control windows DefaultWindowSize uint32 @@ -138,10 +137,10 @@ func Handshake( handshakeSetting := http2.Setting{ID: SettingMuxerMagic, Val: MuxerMagicEdge} compressionSetting := http2.Setting{ID: SettingCompression, Val: config.CompressionQuality.toH2Setting()} if CompressionIsSupported() { - config.Logger.Debug("muxer: Compression is supported") + config.Log.Debug().Msg("muxer: Compression is supported") m.compressionQuality = config.CompressionQuality.getPreset() } else { - config.Logger.Debug("muxer: Compression is not supported") + config.Log.Debug().Msg("muxer: Compression is not supported") compressionSetting = http2.Setting{ID: SettingCompression, Val: 0} } @@ -178,12 +177,12 @@ func Handshake( // Sanity check to enusre idelDuration is sane if idleDuration == 0 || idleDuration < defaultTimeout { idleDuration = defaultTimeout - config.Logger.Infof("muxer: Minimum idle time has been adjusted to %d", defaultTimeout) + config.Log.Info().Msgf("muxer: Minimum idle time has been adjusted to %d", defaultTimeout) } maxRetries := config.MaxHeartbeats if maxRetries == 0 { maxRetries = defaultRetries - config.Logger.Infof("muxer: Minimum number of unacked heartbeats to send before closing the connection has been adjusted to %d", maxRetries) + config.Log.Info().Msgf("muxer: Minimum number of unacked heartbeats to send before closing the connection has been adjusted to %d", maxRetries) } compBytesBefore, compBytesAfter := NewAtomicCounter(0), NewAtomicCounter(0) @@ -325,7 +324,7 @@ func (m *Muxer) Serve(ctx context.Context) error { errGroup.Go(func() error { ch := make(chan error) go func() { - err := m.muxReader.run(m.config.Logger) + err := m.muxReader.run(m.config.Log) m.explicitShutdown.Fuse(false) m.r.Close() m.abort() @@ -346,7 +345,7 @@ func (m *Muxer) Serve(ctx context.Context) error { errGroup.Go(func() error { ch := make(chan error) go func() { - err := m.muxWriter.run(m.config.Logger) + err := m.muxWriter.run(m.config.Log) m.explicitShutdown.Fuse(false) m.w.Close() m.abort() @@ -367,7 +366,7 @@ func (m *Muxer) Serve(ctx context.Context) error { errGroup.Go(func() error { ch := make(chan error) go func() { - err := m.muxMetricsUpdater.run(m.config.Logger) + err := m.muxMetricsUpdater.run(m.config.Log) // don't block if parent goroutine quit early select { case ch <- err: diff --git a/h2mux/h2mux_test.go b/h2mux/h2mux_test.go index 3bef2d6c..180cffe1 100644 --- a/h2mux/h2mux_test.go +++ b/h2mux/h2mux_test.go @@ -16,10 +16,9 @@ import ( "time" "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "golang.org/x/sync/errgroup" - - "github.com/cloudflare/cloudflared/logger" ) const ( @@ -27,6 +26,8 @@ const ( testHandshakeTimeout = time.Millisecond * 1000 ) +var log = zerolog.Nop() + func TestMain(m *testing.M) { if os.Getenv("VERBOSE") == "1" { //TODO: set log level @@ -52,7 +53,7 @@ func NewDefaultMuxerPair(t assert.TestingT, testName string, f MuxedStreamFunc) Handler: f, IsClient: true, Name: "origin", - Logger: logger.NewOutputWriter(logger.NewMockWriteManager()), + Log: &log, DefaultWindowSize: (1 << 8) - 1, MaxWindowSize: (1 << 15) - 1, StreamWriteBufferMaxLen: 1024, @@ -64,7 +65,7 @@ func NewDefaultMuxerPair(t assert.TestingT, testName string, f MuxedStreamFunc) Timeout: testHandshakeTimeout, IsClient: false, Name: "edge", - Logger: logger.NewOutputWriter(logger.NewMockWriteManager()), + Log: &log, DefaultWindowSize: (1 << 8) - 1, MaxWindowSize: (1 << 15) - 1, StreamWriteBufferMaxLen: 1024, @@ -87,7 +88,7 @@ func NewCompressedMuxerPair(t assert.TestingT, testName string, quality Compress IsClient: true, Name: "origin", CompressionQuality: quality, - Logger: logger.NewOutputWriter(logger.NewMockWriteManager()), + Log: &log, HeartbeatInterval: defaultTimeout, MaxHeartbeats: defaultRetries, }, @@ -97,7 +98,7 @@ func NewCompressedMuxerPair(t assert.TestingT, testName string, quality Compress IsClient: false, Name: "edge", CompressionQuality: quality, - Logger: logger.NewOutputWriter(logger.NewMockWriteManager()), + Log: &log, HeartbeatInterval: defaultTimeout, MaxHeartbeats: defaultRetries, }, @@ -186,11 +187,11 @@ func TestSingleStream(t *testing.T) { if stream.Headers[0].Value != "headerValue" { t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value) } - stream.WriteHeaders([]Header{ + _ = stream.WriteHeaders([]Header{ {Name: "response-header", Value: "responseValue"}, }) buf := []byte("Hello world") - stream.Write(buf) + _, _ = stream.Write(buf) n, err := io.ReadFull(stream, buf) if n > 0 { t.Fatalf("read %d bytes after EOF", n) @@ -230,7 +231,7 @@ func TestSingleStream(t *testing.T) { if string(responseBody) != "Hello world" { t.Fatalf("expected response body %s, got %s", "Hello world", responseBody) } - stream.Close() + _ = stream.Close() n, err = stream.Write([]byte("aaaaa")) if n > 0 { t.Fatalf("wrote %d bytes after EOF", n) @@ -252,7 +253,7 @@ func TestSingleStreamLargeResponseBody(t *testing.T) { if stream.Headers[0].Value != "headerValue" { t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value) } - stream.WriteHeaders([]Header{ + _ = stream.WriteHeaders([]Header{ {Name: "response-header", Value: "responseValue"}, }) payload := make([]byte, bodySize) @@ -302,7 +303,6 @@ func TestSingleStreamLargeResponseBody(t *testing.T) { } func TestMultipleStreams(t *testing.T) { - l := logger.NewOutputWriter(logger.NewMockWriteManager()) f := MuxedStreamFunc(func(stream *MuxedStream) error { if len(stream.Headers) != 1 { t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers)) @@ -310,13 +310,13 @@ func TestMultipleStreams(t *testing.T) { if stream.Headers[0].Name != "client-token" { t.Fatalf("expected header name %s, got %s", "client-token", stream.Headers[0].Name) } - l.Debugf("Got request for stream %s", stream.Headers[0].Value) - stream.WriteHeaders([]Header{ + log.Debug().Msgf("Got request for stream %s", stream.Headers[0].Value) + _ = stream.WriteHeaders([]Header{ {Name: "response-token", Value: stream.Headers[0].Value}, }) - l.Debugf("Wrote headers for stream %s", stream.Headers[0].Value) - stream.Write([]byte("OK")) - l.Debugf("Wrote body for stream %s", stream.Headers[0].Value) + log.Debug().Msgf("Wrote headers for stream %s", stream.Headers[0].Value) + _, _ = stream.Write([]byte("OK")) + log.Debug().Msgf("Wrote body for stream %s", stream.Headers[0].Value) return nil }) muxPair := NewDefaultMuxerPair(t, t.Name(), f) @@ -334,7 +334,7 @@ func TestMultipleStreams(t *testing.T) { []Header{{Name: "client-token", Value: tokenString}}, nil, ) - l.Debugf("Got headers for stream %d", tokenId) + log.Debug().Msgf("Got headers for stream %d", tokenId) if err != nil { errorsC <- err return @@ -372,7 +372,7 @@ func TestMultipleStreams(t *testing.T) { testFail := false for err := range errorsC { testFail = true - l.Errorf("%s", err) + log.Error().Msgf("%s", err) } if testFail { t.Fatalf("TestMultipleStreams failed") @@ -396,7 +396,7 @@ func TestMultipleStreamsFlowControl(t *testing.T) { if stream.Headers[0].Value != "headerValue" { t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value) } - stream.WriteHeaders([]Header{ + _ = stream.WriteHeaders([]Header{ {Name: "response-header", Value: "responseValue"}, }) payload := make([]byte, responseSizes[(stream.streamID-2)/2]) @@ -450,27 +450,25 @@ func TestMultipleStreamsFlowControl(t *testing.T) { } func TestGracefulShutdown(t *testing.T) { - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - sendC := make(chan struct{}) responseBuf := bytes.Repeat([]byte("Hello world"), 65536) f := MuxedStreamFunc(func(stream *MuxedStream) error { - stream.WriteHeaders([]Header{ + _ = stream.WriteHeaders([]Header{ {Name: "response-header", Value: "responseValue"}, }) <-sendC - l.Debugf("Writing %d bytes", len(responseBuf)) - stream.Write(responseBuf) - stream.CloseWrite() - l.Debugf("Wrote %d bytes", len(responseBuf)) + log.Debug().Msgf("Writing %d bytes", len(responseBuf)) + _, _ = stream.Write(responseBuf) + _ = stream.CloseWrite() + log.Debug().Msgf("Wrote %d bytes", len(responseBuf)) // Reading from the stream will block until the edge closes its end of the stream. // Otherwise, we'll close the whole connection before receiving the 'stream closed' // message from the edge. // Graceful shutdown works if you omit this, it just gives spurious errors for now - // TODO ignore errors when writing 'stream closed' and we're shutting down. - stream.Read([]byte{0}) - l.Debugf("Handler ends") + _, _ = stream.Read([]byte{0}) + log.Debug().Msgf("Handler ends") return nil }) muxPair := NewDefaultMuxerPair(t, t.Name(), f) @@ -487,7 +485,7 @@ func TestGracefulShutdown(t *testing.T) { muxPair.EdgeMux.Shutdown() close(sendC) responseBody := make([]byte, len(responseBuf)) - l.Debugf("Waiting for %d bytes", len(responseBuf)) + log.Debug().Msgf("Waiting for %d bytes", len(responseBuf)) n, err := io.ReadFull(stream, responseBody) if err != nil { t.Fatalf("error from (*MuxedStream).Read with %d bytes read: %s", n, err) @@ -498,7 +496,7 @@ func TestGracefulShutdown(t *testing.T) { if !bytes.Equal(responseBuf, responseBody) { t.Fatalf("response body mismatch") } - stream.Close() + _ = stream.Close() muxPair.Wait(t) } @@ -509,7 +507,7 @@ func TestUnexpectedShutdown(t *testing.T) { f := MuxedStreamFunc(func(stream *MuxedStream) error { defer close(handlerFinishC) - stream.WriteHeaders([]Header{ + _ = stream.WriteHeaders([]Header{ {Name: "response-header", Value: "responseValue"}, }) <-sendC @@ -536,7 +534,7 @@ func TestUnexpectedShutdown(t *testing.T) { nil, ) // Close the underlying connection before telling the origin to write. - muxPair.EdgeConn.Close() + _ = muxPair.EdgeConn.Close() close(sendC) if err != nil { t.Fatalf("error in OpenStream: %s", err) @@ -559,18 +557,18 @@ func TestUnexpectedShutdown(t *testing.T) { func EchoHandler(stream *MuxedStream) error { var buf bytes.Buffer - fmt.Fprintf(&buf, "Hello, world!\n\n# REQUEST HEADERS:\n\n") + _, _ = fmt.Fprintf(&buf, "Hello, world!\n\n# REQUEST HEADERS:\n\n") for _, header := range stream.Headers { - fmt.Fprintf(&buf, "[%s] = %s\n", header.Name, header.Value) + _, _ = fmt.Fprintf(&buf, "[%s] = %s\n", header.Name, header.Value) } - stream.WriteHeaders([]Header{ + _ = stream.WriteHeaders([]Header{ {Name: ":status", Value: "200"}, {Name: "server", Value: "Echo-server/1.0"}, {Name: "date", Value: time.Now().Format(time.RFC850)}, {Name: "content-type", Value: "text/html; charset=utf-8"}, {Name: "content-length", Value: strconv.Itoa(buf.Len())}, }) - buf.WriteTo(stream) + _, _ = buf.WriteTo(stream) return nil } @@ -582,14 +580,14 @@ func TestOpenAfterDisconnect(t *testing.T) { switch i { case 0: // Close both directions of the connection to cause EOF on both peers. - muxPair.OriginConn.Close() - muxPair.EdgeConn.Close() + _ = muxPair.OriginConn.Close() + _ = muxPair.EdgeConn.Close() case 1: // Close origin conn to cause EOF on origin first. - muxPair.OriginConn.Close() + _ = muxPair.OriginConn.Close() case 2: // Close edge conn to cause EOF on edge first. - muxPair.EdgeConn.Close() + _ = muxPair.EdgeConn.Close() } _, err := muxPair.OpenEdgeMuxStream( @@ -617,7 +615,7 @@ func TestHPACK(t *testing.T) { if err != nil { t.Fatalf("error in OpenStream: %s", err) } - stream.Close() + _ = stream.Close() for i := 0; i < 3; i++ { stream, err := muxPair.OpenEdgeMuxStream( @@ -654,8 +652,8 @@ func TestHPACK(t *testing.T) { if stream.Headers[0].Value != "200" { t.Fatalf("expected status 200, got %s", stream.Headers[0].Value) } - ioutil.ReadAll(stream) - stream.Close() + _, _ = ioutil.ReadAll(stream) + _ = stream.Close() } } @@ -680,7 +678,7 @@ func AssertIfPipeReadable(t *testing.T, pipe io.ReadCloser) { } func TestMultipleStreamsWithDictionaries(t *testing.T) { - l := logger.NewOutputWriter(logger.NewMockWriteManager()) + l := zerolog.Nop() for q := CompressionNone; q <= CompressionMax; q++ { htmlBody := ` ` -func StartHelloWorldServer(logger logger.Service, listener net.Listener, shutdownC <-chan struct{}) error { - logger.Infof("Starting Hello World server at %s", listener.Addr()) +func StartHelloWorldServer(log *zerolog.Logger, listener net.Listener, shutdownC <-chan struct{}) error { + log.Info().Msgf("Starting Hello World server at %s", listener.Addr()) serverName := defaultServerName if hostname, err := os.Hostname(); err == nil { serverName = hostname @@ -113,14 +113,14 @@ func StartHelloWorldServer(logger logger.Service, listener net.Listener, shutdow muxer := http.NewServeMux() muxer.HandleFunc(UptimeRoute, uptimeHandler(time.Now())) - muxer.HandleFunc(WSRoute, websocketHandler(logger, upgrader)) - muxer.HandleFunc(SSERoute, sseHandler(logger)) + muxer.HandleFunc(WSRoute, websocketHandler(log, upgrader)) + muxer.HandleFunc(SSERoute, sseHandler(log)) muxer.HandleFunc(HealthRoute, healthHandler()) muxer.HandleFunc("/", rootHandler(serverName)) httpServer := &http.Server{Addr: listener.Addr().String(), Handler: muxer} go func() { <-shutdownC - httpServer.Close() + _ = httpServer.Close() }() err := httpServer.Serve(listener) @@ -152,13 +152,13 @@ func uptimeHandler(startTime time.Time) http.HandlerFunc { w.WriteHeader(http.StatusInternalServerError) } else { w.Header().Set("Content-Type", "application/json") - w.Write(respJson) + _, _ = w.Write(respJson) } } } // This handler will echo message -func websocketHandler(logger logger.Service, upgrader websocket.Upgrader) http.HandlerFunc { +func websocketHandler(log *zerolog.Logger, upgrader websocket.Upgrader) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { // This addresses the issue of r.Host includes port but origin header doesn't host, _, err := net.SplitHostPort(r.Host) @@ -168,32 +168,32 @@ func websocketHandler(logger logger.Service, upgrader websocket.Upgrader) http.H conn, err := upgrader.Upgrade(w, r, nil) if err != nil { - logger.Errorf("failed to upgrade to websocket connection, error: %s", err) + log.Error().Msgf("failed to upgrade to websocket connection, error: %s", err) return } defer conn.Close() for { mt, message, err := conn.ReadMessage() if err != nil { - logger.Errorf("websocket read message error: %s", err) + log.Error().Msgf("websocket read message error: %s", err) break } if err := conn.WriteMessage(mt, message); err != nil { - logger.Errorf("websocket write message error: %s", err) + log.Error().Msgf("websocket write message error: %s", err) break } } } } -func sseHandler(logger logger.Service) http.HandlerFunc { +func sseHandler(log *zerolog.Logger) http.HandlerFunc { return func(w http.ResponseWriter, r *http.Request) { w.Header().Set("Content-Type", "text/event-stream; charset=utf-8") flusher, ok := w.(http.Flusher) if !ok { w.WriteHeader(http.StatusInternalServerError) - logger.Errorf("Can't support SSE. ResponseWriter %T doesn't implement http.Flusher interface", w) + log.Error().Msgf("Can't support SSE. ResponseWriter %T doesn't implement http.Flusher interface", w) return } @@ -204,7 +204,7 @@ func sseHandler(logger logger.Service) http.HandlerFunc { freq = parsedFreq } } - logger.Infof("Server Sent Events every %s", freq) + log.Info().Msgf("Server Sent Events every %s", freq) ticker := time.NewTicker(freq) counter := 0 for { @@ -247,9 +247,9 @@ func rootHandler(serverName string) http.HandlerFunc { }) if err != nil { w.WriteHeader(http.StatusInternalServerError) - fmt.Fprintf(w, "error: %v", err) + _, _ = fmt.Fprintf(w, "error: %v", err) } else { - buffer.WriteTo(w) + _, _ = buffer.WriteTo(w) } } } diff --git a/ingress/ingress.go b/ingress/ingress.go index 25db8f42..dcf70ae1 100644 --- a/ingress/ingress.go +++ b/ingress/ingress.go @@ -9,11 +9,11 @@ import ( "strings" "sync" - "github.com/pkg/errors" - "github.com/urfave/cli/v2" - "github.com/cloudflare/cloudflared/cmd/cloudflared/config" - "github.com/cloudflare/cloudflared/logger" + + "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/urfave/cli/v2" ) var ( @@ -63,7 +63,7 @@ type Ingress struct { // NewSingleOrigin constructs an Ingress set with only one rule, constructed from // legacy CLI parameters like --url or --no-chunked-encoding. -func NewSingleOrigin(c *cli.Context, allowURLFromArgs bool, logger logger.Service) (Ingress, error) { +func NewSingleOrigin(c *cli.Context, allowURLFromArgs bool) (Ingress, error) { service, err := parseSingleOriginService(c, allowURLFromArgs) if err != nil { @@ -113,10 +113,10 @@ func (ing Ingress) IsEmpty() bool { } // StartOrigins will start any origin services managed by cloudflared, e.g. proxy servers or Hello World. -func (ing Ingress) StartOrigins(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error) { +func (ing Ingress) StartOrigins(wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error) { for _, rule := range ing.Rules { if err := rule.Service.start(wg, log, shutdownC, errC, rule.Config); err != nil { - log.Errorf("Error starting local service %s: %s", rule.Service, err) + log.Error().Msgf("Error starting local service %s: %s", rule.Service, err) } } } diff --git a/ingress/ingress_test.go b/ingress/ingress_test.go index d83e10be..411f22b4 100644 --- a/ingress/ingress_test.go +++ b/ingress/ingress_test.go @@ -14,7 +14,6 @@ import ( "gopkg.in/yaml.v2" "github.com/cloudflare/cloudflared/cmd/cloudflared/config" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/tlsconfig" ) @@ -329,9 +328,8 @@ func TestSingleOriginSetsConfig(t *testing.T) { require.NoError(t, err) allowURLFromArgs := false - logger, err := logger.New() require.NoError(t, err) - ingress, err := NewSingleOrigin(cliCtx, allowURLFromArgs, logger) + ingress, err := NewSingleOrigin(cliCtx, allowURLFromArgs) require.NoError(t, err) assert.Equal(t, time.Second, ingress.Rules[0].Config.ConnectTimeout) diff --git a/ingress/origin_service.go b/ingress/origin_service.go index e8525672..ba3beb7d 100644 --- a/ingress/origin_service.go +++ b/ingress/origin_service.go @@ -13,12 +13,12 @@ import ( "time" "github.com/cloudflare/cloudflared/hello" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/socks" "github.com/cloudflare/cloudflared/tlsconfig" "github.com/cloudflare/cloudflared/websocket" gws "github.com/gorilla/websocket" "github.com/pkg/errors" + "github.com/rs/zerolog" ) // OriginService is something a tunnel can proxy traffic to. @@ -29,7 +29,7 @@ type OriginService interface { // Start the origin service if it's managed by cloudflared, e.g. proxy servers or Hello World. // If it's not managed by cloudflared, this is a no-op because the user is responsible for // starting the origin service. - start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error + start(wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error } // unixSocketPath is an OriginService representing a unix socket (which accepts HTTP) @@ -42,7 +42,7 @@ func (o *unixSocketPath) String() string { return "unix socket: " + o.path } -func (o *unixSocketPath) start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error { +func (o *unixSocketPath) start(wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error { transport, err := newHTTPTransport(o, cfg, log) if err != nil { return err @@ -84,7 +84,7 @@ func (o *localService) Dial(reqURL *url.URL, headers http.Header) (*gws.Conn, *h return d.Dial(reqURL.String(), headers) } -func (o *localService) start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error { +func (o *localService) start(wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error { transport, err := newHTTPTransport(o, cfg, log) if err != nil { return err @@ -101,13 +101,13 @@ func (o *localService) start(wg *sync.WaitGroup, log logger.Service, shutdownC < return nil } -func (o *localService) startProxy(staticHost string, wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error { +func (o *localService) startProxy(staticHost string, wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error { // Start a listener for the proxy proxyAddress := net.JoinHostPort(cfg.ProxyAddress, strconv.Itoa(int(cfg.ProxyPort))) listener, err := net.Listen("tcp", proxyAddress) if err != nil { - log.Errorf("Cannot start Websocket Proxy Server: %s", err) + log.Error().Msgf("Cannot start Websocket Proxy Server: %s", err) return errors.Wrap(err, "Cannot start Websocket Proxy Server") } @@ -119,18 +119,18 @@ func (o *localService) startProxy(staticHost string, wg *sync.WaitGroup, log log // This origin's config specifies what type of proxy to start. switch cfg.ProxyType { case socksProxy: - log.Info("SOCKS5 server started") + log.Info().Msg("SOCKS5 server started") streamHandler = func(wsConn *websocket.Conn, remoteConn net.Conn, _ http.Header) { dialer := socks.NewConnDialer(remoteConn) requestHandler := socks.NewRequestHandler(dialer) socksServer := socks.NewConnectionHandler(requestHandler) - socksServer.Serve(wsConn) + _ = socksServer.Serve(wsConn) } case "": - log.Debug("Not starting any websocket proxy") + log.Debug().Msg("Not starting any websocket proxy") default: - log.Errorf("%s isn't a valid proxy (valid options are {%s})", cfg.ProxyType, socksProxy) + log.Error().Msgf("%s isn't a valid proxy (valid options are {%s})", cfg.ProxyType, socksProxy) } errC <- websocket.StartProxyServer(log, listener, staticHost, shutdownC, streamHandler) @@ -203,7 +203,13 @@ func (o *helloWorld) String() string { } // Start starts a HelloWorld server and stores its address in the Service receiver. -func (o *helloWorld) start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error { +func (o *helloWorld) start( + wg *sync.WaitGroup, + log *zerolog.Logger, + shutdownC <-chan struct{}, + errC chan error, + cfg OriginRequestConfig, +) error { transport, err := newHTTPTransport(o, cfg, log) if err != nil { return err @@ -261,7 +267,13 @@ func (o *statusCode) String() string { return fmt.Sprintf("HTTP %d", o.resp.StatusCode) } -func (o *statusCode) start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error { +func (o *statusCode) start( + wg *sync.WaitGroup, + log *zerolog.Logger, + shutdownC <-chan struct{}, + errC chan error, + cfg OriginRequestConfig, +) error { return nil } @@ -280,7 +292,7 @@ func (nrc *NopReadCloser) Close() error { return nil } -func newHTTPTransport(service OriginService, cfg OriginRequestConfig, log logger.Service) (*http.Transport, error) { +func newHTTPTransport(service OriginService, cfg OriginRequestConfig, log *zerolog.Logger) (*http.Transport, error) { originCertPool, err := tlsconfig.LoadOriginCA(cfg.CAPool, log) if err != nil { return nil, errors.Wrap(err, "Error loading cert pool") @@ -338,6 +350,6 @@ func (mos MockOriginService) String() string { return "MockOriginService" } -func (mos MockOriginService) start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error { +func (mos MockOriginService) start(wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error { return nil } diff --git a/logger/configuration.go b/logger/configuration.go index 0b38e4fe..ed969ec7 100644 --- a/logger/configuration.go +++ b/logger/configuration.go @@ -29,7 +29,7 @@ type RollingConfig struct { } func createDefaultConfig() Config { - const minLevel = "fatal" + const minLevel = "info" const RollingMaxSize = 1 // Mb const RollingMaxBackups = 5 // files @@ -57,7 +57,7 @@ func createDefaultConfig() Config { func CreateConfig( minLevel string, disableTerminal bool, - rollingLogPath, nonRollingLogFilePath string, + rollingLogPath, rollingLogFilename, nonRollingLogFilePath string, ) *Config { var console *ConsoleConfig if !disableTerminal { @@ -71,7 +71,7 @@ func CreateConfig( var rolling *RollingConfig if rollingLogPath != "" { - rolling = createRollingConfig(rollingLogPath) + rolling = createRollingConfig(rollingLogPath, rollingLogFilename) } if minLevel == "" { @@ -103,14 +103,14 @@ func createFileConfig(filepath string) *FileConfig { } } -func createRollingConfig(directory string) *RollingConfig { +func createRollingConfig(directory, filename string) *RollingConfig { if directory == "" { directory = defaultConfig.RollingConfig.Directory } return &RollingConfig{ Directory: directory, - Filename: defaultConfig.RollingConfig.Filename, + Filename: filename, maxSize: defaultConfig.RollingConfig.maxSize, maxBackups: defaultConfig.RollingConfig.maxBackups, maxAge: defaultConfig.RollingConfig.maxAge, diff --git a/logger/create.go b/logger/create.go index 03cf3c65..70f888e2 100644 --- a/logger/create.go +++ b/logger/create.go @@ -1,13 +1,11 @@ package logger import ( - "fmt" + "io" "os" - "path/filepath" - "strings" - "time" - "github.com/alecthomas/units" + "github.com/rs/zerolog" + fallbacklog "github.com/rs/zerolog/log" "github.com/urfave/cli/v2" ) @@ -24,162 +22,40 @@ const ( LogSSHLevelFlag = "log-level" ) -// Option is to encaspulate actions that will be called by Parse and run later to build an Options struct -type Option func(*Options) error +func newZerolog(loggerConfig *Config) *zerolog.Logger { + var writers []io.Writer -// Options is use to set logging configuration data -type Options struct { - logFileDirectory string - maxFileSize units.Base2Bytes - maxFileCount uint - terminalOutputDisabled bool - supportedFileLevels []Level - supportedTerminalLevels []Level -} - -// DisableTerminal stops terminal output for the logger -func DisableTerminal(disable bool) Option { - return func(c *Options) error { - c.terminalOutputDisabled = disable - return nil + if loggerConfig.ConsoleConfig != nil { + writers = append(writers, zerolog.ConsoleWriter{ + Out: os.Stderr, + NoColor: loggerConfig.ConsoleConfig.noColor, + }) } -} -// File sets a custom file to log events -func File(path string, size units.Base2Bytes, count uint) Option { - return func(c *Options) error { - c.logFileDirectory = path - c.maxFileSize = size - c.maxFileCount = count - return nil - } -} + // TODO TUN-3472: Support file writer and log rotation -// DefaultFile configures the log options will the defaults -func DefaultFile(directoryPath string) Option { - return func(c *Options) error { - size, err := units.ParseBase2Bytes("1MB") - if err != nil { - return err - } - - c.logFileDirectory = directoryPath - c.maxFileSize = size - c.maxFileCount = 5 - return nil - } -} - -// SupportedFileLevels sets the supported logging levels for the log file -func SupportedFileLevels(supported []Level) Option { - return func(c *Options) error { - c.supportedFileLevels = supported - return nil - } -} - -// SupportedTerminalevels sets the supported logging levels for the terminal output -func SupportedTerminalevels(supported []Level) Option { - return func(c *Options) error { - c.supportedTerminalLevels = supported - return nil - } -} - -// LogLevelString sets the supported logging levels from a command line flag -func LogLevelString(level string) Option { - return func(c *Options) error { - supported, err := ParseLevelString(level) - if err != nil { - return err - } - c.supportedFileLevels = supported - c.supportedTerminalLevels = supported - return nil - } -} - -// Parse builds the Options struct so the caller knows what actions should be run -func Parse(opts ...Option) (*Options, error) { - options := &Options{} - for _, opt := range opts { - if err := opt(options); err != nil { - return nil, err - } - } - return options, nil -} - -// New setups a new logger based on the options. -// The default behavior is to write to standard out -func New(opts ...Option) (*OutputWriter, error) { - options, err := Parse(opts...) + multi := zerolog.MultiLevelWriter(writers...) + level, err := zerolog.ParseLevel(loggerConfig.MinLevel) if err != nil { - return nil, err + failLog := fallbacklog.With().Logger() + fallbacklog.Error().Msgf("Falling back to a default logger due to logger setup failure: %s", err) + return &failLog } + log := zerolog.New(multi).With().Timestamp().Logger().Level(level) - l := NewOutputWriter(SharedWriteManager) - if options.logFileDirectory != "" { - l.Add(NewFileRollingWriter(SanitizeLogPath(options.logFileDirectory), - "cloudflared", - int64(options.maxFileSize), - options.maxFileCount), - NewDefaultFormatter(time.RFC3339Nano), options.supportedFileLevels...) - } - - if !options.terminalOutputDisabled { - terminalFormatter := NewTerminalFormatter(time.RFC3339) - - if len(options.supportedTerminalLevels) == 0 { - l.Add(os.Stderr, terminalFormatter, InfoLevel, ErrorLevel, FatalLevel) - } else { - l.Add(os.Stderr, terminalFormatter, options.supportedTerminalLevels...) - } - } - - return l, nil + return &log } -func NewInHouse(loggerConfig *Config) (*OutputWriter, error) { - var loggerOpts []Option - - var logPath string - if loggerConfig.FileConfig != nil { - logPath = loggerConfig.FileConfig.Filepath - } - if logPath == "" && loggerConfig.RollingConfig != nil { - logPath = loggerConfig.RollingConfig.Directory - } - - if logPath != "" { - loggerOpts = append(loggerOpts, DefaultFile(logPath)) - } - - loggerOpts = append(loggerOpts, LogLevelString(loggerConfig.MinLevel)) - - if loggerConfig.ConsoleConfig == nil { - disableOption := DisableTerminal(true) - loggerOpts = append(loggerOpts, disableOption) - } - - l, err := New(loggerOpts...) - if err != nil { - return nil, err - } - - return l, nil -} - -func CreateTransportLoggerFromContext(c *cli.Context, disableTerminal bool) (*OutputWriter, error) { +func CreateTransportLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger { return createFromContext(c, LogTransportLevelFlag, LogDirectoryFlag, disableTerminal) } -func CreateLoggerFromContext(c *cli.Context, disableTerminal bool) (*OutputWriter, error) { +func CreateLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger { return createFromContext(c, LogLevelFlag, LogDirectoryFlag, disableTerminal) } -func CreateSSHLoggerFromContext(c *cli.Context, disableTerminal bool) (*OutputWriter, error) { +func CreateSSHLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger { return createFromContext(c, LogSSHLevelFlag, LogSSHDirectoryFlag, disableTerminal) } @@ -188,37 +64,26 @@ func createFromContext( logLevelFlagName, logDirectoryFlagName string, disableTerminal bool, -) (*OutputWriter, error) { +) *zerolog.Logger { logLevel := c.String(logLevelFlagName) logFile := c.String(LogFileFlag) logDirectory := c.String(logDirectoryFlagName) - loggerConfig := CreateConfig(logLevel, disableTerminal, logDirectory, logFile) + loggerConfig := CreateConfig( + logLevel, + disableTerminal, + logDirectory, + defaultConfig.RollingConfig.Filename, + logFile, + ) - return NewInHouse(loggerConfig) + return newZerolog(loggerConfig) } -// ParseLevelString returns the expected log levels based on the cmd flag -func ParseLevelString(lvl string) ([]Level, error) { - switch strings.ToLower(lvl) { - case "fatal": - return []Level{FatalLevel}, nil - case "error": - return []Level{FatalLevel, ErrorLevel}, nil - case "info", "warn": - return []Level{FatalLevel, ErrorLevel, InfoLevel}, nil - case "debug": - return []Level{FatalLevel, ErrorLevel, InfoLevel, DebugLevel}, nil +func Create(loggerConfig *Config) *zerolog.Logger { + if loggerConfig == nil { + loggerConfig = &defaultConfig } - return []Level{}, fmt.Errorf("not a valid log level: %q", lvl) -} -// SanitizeLogPath checks that the logger log path -func SanitizeLogPath(path string) string { - newPath := strings.TrimSpace(path) - // make sure it has a log file extension and is not a directory - if filepath.Ext(newPath) != ".log" && !(isDirectory(newPath) || strings.HasSuffix(newPath, "/")) { - newPath = newPath + ".log" - } - return newPath + return newZerolog(loggerConfig) } diff --git a/logger/create_test.go b/logger/create_test.go deleted file mode 100644 index a0617351..00000000 --- a/logger/create_test.go +++ /dev/null @@ -1,46 +0,0 @@ -package logger - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestLogLevelParse(t *testing.T) { - lvls, err := ParseLevelString("fatal") - assert.NoError(t, err) - assert.Equal(t, []Level{FatalLevel}, lvls) - - lvls, err = ParseLevelString("error") - assert.NoError(t, err) - assert.Equal(t, []Level{FatalLevel, ErrorLevel}, lvls) - - lvls, err = ParseLevelString("info") - assert.NoError(t, err) - assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel}, lvls) - - lvls, err = ParseLevelString("info") - assert.NoError(t, err) - assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel}, lvls) - - lvls, err = ParseLevelString("warn") - assert.NoError(t, err) - assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel}, lvls) - - lvls, err = ParseLevelString("debug") - assert.NoError(t, err) - assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel, DebugLevel}, lvls) - - _, err = ParseLevelString("blah") - assert.Error(t, err) - - _, err = ParseLevelString("") - assert.Error(t, err) -} - -func TestPathSanitizer(t *testing.T) { - assert.Equal(t, "somebad/path/log.bat.log", SanitizeLogPath("\t somebad/path/log.bat\n\n")) - assert.Equal(t, "proper/path/cloudflared.log", SanitizeLogPath("proper/path/cloudflared.log")) - assert.Equal(t, "proper/path/", SanitizeLogPath("proper/path/")) - assert.Equal(t, "proper/path/cloudflared.log", SanitizeLogPath("\tproper/path/cloudflared\n\n")) -} diff --git a/logger/file_writer.go b/logger/file_writer.go deleted file mode 100644 index a1736ff1..00000000 --- a/logger/file_writer.go +++ /dev/null @@ -1,125 +0,0 @@ -package logger - -import ( - "fmt" - "os" - "path/filepath" -) - -// FileRollingWriter maintains a set of log files numbered in order -// to keep a subset of log data to ensure it doesn't grow pass defined limits -type FileRollingWriter struct { - baseFileName string - directory string - maxFileSize int64 - maxFileCount uint - fileHandle *os.File -} - -// NewFileRollingWriter creates a new rolling file writer. -// directory is the working directory for the files -// baseFileName is the log file name. This writer appends .log to the name for the file name -// maxFileSize is the size in bytes of how large each file can be. Not a hard limit, general limit based after each write -// maxFileCount is the number of rolled files to keep. -func NewFileRollingWriter(directory, baseFileName string, maxFileSize int64, maxFileCount uint) *FileRollingWriter { - return &FileRollingWriter{ - directory: directory, - baseFileName: baseFileName, - maxFileSize: maxFileSize, - maxFileCount: maxFileCount, - } -} - -// Write is an implementation of io.writer the rolls the file once it reaches its max size -// It is expected the caller to Write is doing so in a thread safe manner (as WriteManager does). -func (w *FileRollingWriter) Write(p []byte) (n int, err error) { - logFile, isSingleFile := buildPath(w.directory, w.baseFileName) - if w.fileHandle == nil { - h, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0664) - if err != nil { - return 0, err - } - w.fileHandle = h - } - - // get size for rolling check - info, err := w.fileHandle.Stat() - if err != nil { - // failed to stat the file. Close the file handle and attempt to open a new handle on the next write - w.Close() - w.fileHandle = nil - return 0, err - } - - // write to the file - written, err := w.fileHandle.Write(p) - - // check if the file needs to be rolled - if err == nil && info.Size()+int64(written) > w.maxFileSize && !isSingleFile { - // close the file handle than do the renaming. A new one will be opened on the next write - w.Close() - w.rename(logFile, 1) - } - - return written, err -} - -// Close closes the file handle if it is open -func (w *FileRollingWriter) Close() { - if w.fileHandle != nil { - w.fileHandle.Close() - w.fileHandle = nil - } -} - -// rename is how the files are rolled. It works recursively to move the base log file to the rolled ones -// e.g. cloudflared.log -> cloudflared-1.log, -// but if cloudflared-1.log already exists, it is renamed to cloudflared-2.log, -// then the other files move in to their postion -func (w *FileRollingWriter) rename(sourcePath string, index uint) { - destinationPath, isSingleFile := buildPath(w.directory, fmt.Sprintf("%s-%d", w.baseFileName, index)) - if isSingleFile { - return //don't need to rename anything, it is a single file - } - - // rolled to the max amount of files allowed on disk - if index >= w.maxFileCount { - os.Remove(destinationPath) - } - - // if the rolled path already exist, rename it to cloudflared-2.log, then do this one. - // recursive call since the oldest one needs to be renamed, before the newer ones can be moved - if exists(destinationPath) { - w.rename(destinationPath, index+1) - } - - os.Rename(sourcePath, destinationPath) -} - -// return the path to the log file and if it is a single file or not. -// true means a single file. false means a rolled file -func buildPath(directory, fileName string) (string, bool) { - if !isDirectory(directory) { // not a directory, so try and treat it as a single file for backwards compatibility sake - return directory, true - } - return filepath.Join(directory, fileName+".log"), false -} - -func exists(filePath string) bool { - if _, err := os.Stat(filePath); os.IsNotExist(err) { - return false - } - return true -} - -func isDirectory(path string) bool { - if path == "" { - return true - } - - fileInfo, err := os.Stat(path) - if err != nil { - return false - } - return fileInfo.IsDir() -} diff --git a/logger/file_writer_test.go b/logger/file_writer_test.go deleted file mode 100644 index 3c7a66a1..00000000 --- a/logger/file_writer_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package logger - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestFileWrite(t *testing.T) { - fileName := "test_file" - fileLog := fileName + ".log" - testData := []byte(string("hello Dalton, how are you doing?")) - defer func() { - os.Remove(fileLog) - }() - - w := NewFileRollingWriter("", fileName, 1000, 2) - defer w.Close() - - l, err := w.Write(testData) - - assert.NoError(t, err) - assert.Equal(t, l, len(testData), "expected write length and data length to match") - - d, err := ioutil.ReadFile(fileLog) - assert.FileExists(t, fileLog, "file doesn't exist at expected path") - assert.Equal(t, d, testData, "expected data in file to match test data") -} - -func TestRolling(t *testing.T) { - dirName := "testdir" - err := os.Mkdir(dirName, 0755) - assert.NoError(t, err) - - fileName := "test_file" - firstFile := filepath.Join(dirName, fileName+".log") - secondFile := filepath.Join(dirName, fileName+"-1.log") - thirdFile := filepath.Join(dirName, fileName+"-2.log") - - defer func() { - os.RemoveAll(dirName) - os.Remove(firstFile) - os.Remove(secondFile) - os.Remove(thirdFile) - }() - - w := NewFileRollingWriter(dirName, fileName, 1000, 2) - defer w.Close() - - for i := 99; i >= 1; i-- { - testData := []byte(fmt.Sprintf("%d bottles of beer on the wall...", i)) - w.Write(testData) - } - assert.FileExists(t, firstFile, "first file doesn't exist as expected") - assert.FileExists(t, secondFile, "second file doesn't exist as expected") - assert.FileExists(t, thirdFile, "third file doesn't exist as expected") - assert.False(t, exists(filepath.Join(dirName, fileName+"-3.log")), "limited to two files and there is more") -} - -func TestSingleFile(t *testing.T) { - fileName := "test_file" - testData := []byte(string("hello Dalton, how are you doing?")) - defer func() { - os.Remove(fileName) - }() - - w := NewFileRollingWriter(fileName, fileName, 1000, 2) - defer w.Close() - - l, err := w.Write(testData) - - assert.NoError(t, err) - assert.Equal(t, l, len(testData), "expected write length and data length to match") - - d, err := ioutil.ReadFile(fileName) - assert.FileExists(t, fileName, "file doesn't exist at expected path") - assert.Equal(t, d, testData, "expected data in file to match test data") -} - -func TestSingleFileInDirectory(t *testing.T) { - dirName := "testdir" - err := os.Mkdir(dirName, 0755) - assert.NoError(t, err) - - fileName := "test_file" - fullPath := filepath.Join(dirName, fileName+".log") - testData := []byte(string("hello Dalton, how are you doing?")) - defer func() { - os.Remove(fullPath) - os.RemoveAll(dirName) - }() - - w := NewFileRollingWriter(fullPath, fileName, 1000, 2) - defer w.Close() - - l, err := w.Write(testData) - - assert.NoError(t, err) - assert.Equal(t, l, len(testData), "expected write length and data length to match") - - d, err := ioutil.ReadFile(fullPath) - assert.FileExists(t, fullPath, "file doesn't exist at expected path") - assert.Equal(t, d, testData, "expected data in file to match test data") -} diff --git a/logger/formatter.go b/logger/formatter.go deleted file mode 100644 index 0ed0c3fa..00000000 --- a/logger/formatter.go +++ /dev/null @@ -1,138 +0,0 @@ -package logger - -import ( - "fmt" - "runtime" - "time" - - "github.com/acmacalister/skittles" -) - -// Level of logging, lower number means more verbose logging, higher more terse -type Level int - -const ( - // DebugLevel is for messages that are intended for purposes debugging only - DebugLevel Level = iota - - // InfoLevel is for standard log messages - InfoLevel - - // ErrorLevel is for error message to indicate something has gone wrong - ErrorLevel - - // FatalLevel is for error message that log and kill the program with an os.exit(1) - FatalLevel -) - -// Formatter is the base interface for formatting logging messages before writing them out -type Formatter interface { - Timestamp(Level, time.Time) string // format the timestamp string - Content(Level, string) string // format content string (color for terminal, etc) -} - -// DefaultFormatter writes a simple structure timestamp and the message per log line -type DefaultFormatter struct { - format string -} - -// NewDefaultFormatter creates the standard log formatter -// format is the time format to use for timestamp formatting -func NewDefaultFormatter(format string) Formatter { - return &DefaultFormatter{ - format: format, - } -} - -// Timestamp formats a log line timestamp with a brackets around them -func (f *DefaultFormatter) Timestamp(l Level, d time.Time) string { - if f.format == "" { - return "" - } - return fmt.Sprintf("[%s]: ", d.Format(f.format)) -} - -// Content just writes the log line straight to the sources -func (f *DefaultFormatter) Content(l Level, c string) string { - return c -} - -// TerminalFormatter is setup for colored output -type TerminalFormatter struct { - format string - supportsColor bool -} - -// UIFormatter is used for streaming logs to UI -type UIFormatter struct { - format string - supportsColor bool -} - -// NewTerminalFormatter creates a Terminal formatter for colored output -// format is the time format to use for timestamp formatting -func NewTerminalFormatter(format string) Formatter { - supportsColor := (runtime.GOOS != "windows") - return &TerminalFormatter{ - format: format, - supportsColor: supportsColor, - } -} - -func NewUIFormatter(format string) Formatter { - supportsColor := (runtime.GOOS != "windows") - return &UIFormatter{ - format: format, - supportsColor: supportsColor, - } -} - -// Timestamp uses formatting that is tview-specific for UI -func (f *UIFormatter) Timestamp(l Level, d time.Time) string { - t := "" - dateStr := "[" + d.Format(f.format) + "] " - switch l { - case InfoLevel: - t = "[#00ffff]INFO[white]" - case ErrorLevel: - t = "[red]ERROR[white]" - case DebugLevel: - t = "[yellow]DEBUG[white]" - case FatalLevel: - t = "[red]FATAL[white]" - } - return t + dateStr -} - -func (f *UIFormatter) Content(l Level, c string) string { - return c -} - -// Timestamp returns the log level with a matching color to the log type -func (f *TerminalFormatter) Timestamp(l Level, d time.Time) string { - t := "" - dateStr := "[" + d.Format(f.format) + "] " - switch l { - case InfoLevel: - t = f.output("INFO", skittles.Cyan) - case ErrorLevel: - t = f.output("ERROR", skittles.Red) - case DebugLevel: - t = f.output("DEBUG", skittles.Yellow) - case FatalLevel: - t = f.output("FATAL", skittles.Red) - } - return t + dateStr -} - -// Content just writes the log line straight to the sources -func (f *TerminalFormatter) Content(l Level, c string) string { - return c -} - -func (f *TerminalFormatter) output(msg string, colorFunc func(interface{}) string) string { - if f.supportsColor { - return colorFunc(msg) - } - return msg -} diff --git a/logger/manager.go b/logger/manager.go deleted file mode 100644 index 6a3df7b6..00000000 --- a/logger/manager.go +++ /dev/null @@ -1,59 +0,0 @@ -package logger - -import "sync" - -// SharedWriteManager is a package level variable to allows multiple loggers to use the same write manager. -// This is useful when multiple loggers will write to the same file to ensure they don't clobber each other. -var SharedWriteManager = NewWriteManager() - -type writeData struct { - target LogOutput - data []byte -} - -// WriteManager is a logging service that handles managing multiple writing streams -type WriteManager struct { - shutdown chan struct{} - writeChan chan writeData - writers map[string]Service - wg sync.WaitGroup -} - -// NewWriteManager creates a write manager that implements OutputManager -func NewWriteManager() OutputManager { - m := &WriteManager{ - shutdown: make(chan struct{}), - writeChan: make(chan writeData, 1000), - } - - go m.run() - return m -} - -// Append adds a message to the writer runloop -func (m *WriteManager) Append(data []byte, target LogOutput) { - m.wg.Add(1) - m.writeChan <- writeData{data: data, target: target} -} - -// Shutdown stops the sync manager service -func (m *WriteManager) Shutdown() { - m.wg.Wait() - close(m.shutdown) - close(m.writeChan) -} - -// run is the main runloop that schedules log messages -func (m *WriteManager) run() { - for { - select { - case event, ok := <-m.writeChan: - if ok { - event.target.WriteLogLine(event.data) - m.wg.Done() - } - case <-m.shutdown: - return - } - } -} diff --git a/logger/manager_test.go b/logger/manager_test.go deleted file mode 100644 index f1303d1b..00000000 --- a/logger/manager_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package logger - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type outputFunc func(b []byte) - -func (f outputFunc) WriteLogLine(data []byte) { - f(data) -} - -func TestWriteManger(t *testing.T) { - testData := []byte(string("hello Austin, how are you doing?")) - waitChan := make(chan []byte) - m := NewWriteManager() - m.Append(testData, outputFunc(func(b []byte) { - waitChan <- b - })) - resp := <-waitChan - assert.Equal(t, testData, resp) -} diff --git a/logger/mock_manager.go b/logger/mock_manager.go deleted file mode 100644 index 9efddd61..00000000 --- a/logger/mock_manager.go +++ /dev/null @@ -1,18 +0,0 @@ -package logger - -// MockWriteManager does nothing and is provided for testing purposes -type MockWriteManager struct { -} - -// NewMockWriteManager creates an OutputManager that does nothing for testing purposes -func NewMockWriteManager() OutputManager { - return &MockWriteManager{} -} - -// Append is a mock stub -func (m *MockWriteManager) Append(data []byte, target LogOutput) { -} - -// Shutdown is a mock stub -func (m *MockWriteManager) Shutdown() { -} diff --git a/logger/output.go b/logger/output.go deleted file mode 100644 index fbddaf79..00000000 --- a/logger/output.go +++ /dev/null @@ -1,157 +0,0 @@ -package logger - -import ( - "fmt" - "io" - "os" - "time" -) - -// provided for testing -var osExit = os.Exit - -type LogOutput interface { - WriteLogLine([]byte) -} - -// OutputManager is used to sync data of Output -type OutputManager interface { - Append([]byte, LogOutput) - Shutdown() -} - -// Service is the logging service that is either a group or single log writer -type Service interface { - Error(message string) - Info(message string) - Debug(message string) - Fatal(message string) - - Errorf(format string, args ...interface{}) - Infof(format string, args ...interface{}) - Debugf(format string, args ...interface{}) - Fatalf(format string, args ...interface{}) - - Add(writer io.Writer, formatter Formatter, levels ...Level) -} - -type sourceGroup struct { - writer io.Writer - formatter Formatter - levelsSupported []Level -} - -func (s *sourceGroup) WriteLogLine(data []byte) { - _, _ = s.writer.Write(data) -} - -func (s *sourceGroup) supportsLevel(l Level) bool { - for _, level := range s.levelsSupported { - if l == level { - return true - } - } - return false -} - -// OutputWriter is the standard logging implementation -type OutputWriter struct { - groups []*sourceGroup - syncWriter OutputManager - minLevel Level -} - -// NewOutputWriter creates a new logger -func NewOutputWriter(syncWriter OutputManager) *OutputWriter { - return &OutputWriter{ - syncWriter: syncWriter, - groups: nil, - minLevel: FatalLevel, - } -} - -// Add a writer and formatter to output to -func (s *OutputWriter) Add(writer io.Writer, formatter Formatter, levels ...Level) { - s.groups = append(s.groups, &sourceGroup{writer: writer, formatter: formatter, levelsSupported: levels}) - - // track most verbose (lowest) level we need to output - for _, level := range levels { - if level < s.minLevel { - s.minLevel = level - } - } -} - -// Error writes an error to the logging sources -func (s *OutputWriter) Error(message string) { - if s.minLevel <= ErrorLevel { - s.output(ErrorLevel, message) - } -} - -// Info writes an info string to the logging sources -func (s *OutputWriter) Info(message string) { - if s.minLevel <= InfoLevel { - s.output(InfoLevel, message) - } -} - -// Debug writes a debug string to the logging sources -func (s *OutputWriter) Debug(message string) { - if s.minLevel <= DebugLevel { - s.output(DebugLevel, message) - } -} - -// Fatal writes a error string to the logging sources and runs does an os.exit() -func (s *OutputWriter) Fatal(message string) { - s.output(FatalLevel, message) - s.syncWriter.Shutdown() // waits for the pending logging to finish - osExit(1) -} - -// Errorf writes a formatted error to the logging sources -func (s *OutputWriter) Errorf(format string, args ...interface{}) { - if s.minLevel <= ErrorLevel { - s.output(ErrorLevel, fmt.Sprintf(format, args...)) - } -} - -// Infof writes a formatted info statement to the logging sources -func (s *OutputWriter) Infof(format string, args ...interface{}) { - if s.minLevel <= InfoLevel { - s.output(InfoLevel, fmt.Sprintf(format, args...)) - } -} - -// Debugf writes a formatted debug statement to the logging sources -func (s *OutputWriter) Debugf(format string, args ...interface{}) { - if s.minLevel <= DebugLevel { - s.output(DebugLevel, fmt.Sprintf(format, args...)) - } -} - -// Fatalf writes a writes a formatted error statement and runs does an os.exit() -func (s *OutputWriter) Fatalf(format string, args ...interface{}) { - s.output(FatalLevel, fmt.Sprintf(format, args...)) - s.syncWriter.Shutdown() // waits for the pending logging to finish - osExit(1) -} - -// output does the actual write to the sync manager -func (s *OutputWriter) output(l Level, content string) { - now := time.Now() - for _, group := range s.groups { - if group.supportsLevel(l) { - logLine := fmt.Sprintf("%s%s\n", group.formatter.Timestamp(l, now), - group.formatter.Content(l, content)) - s.syncWriter.Append([]byte(logLine), group) - } - } -} - -// Write implements io.Writer to support SetOutput of the log package -func (s *OutputWriter) Write(p []byte) (n int, err error) { - s.Info(string(p)) - return len(p), nil -} diff --git a/logger/output_test.go b/logger/output_test.go deleted file mode 100644 index b42387ad..00000000 --- a/logger/output_test.go +++ /dev/null @@ -1,106 +0,0 @@ -package logger - -import ( - "bufio" - "bytes" - "fmt" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestLogLevel(t *testing.T) { - timeFormat := "2006-01-02" - f := NewDefaultFormatter(timeFormat) - m := NewWriteManager() - - var testBuffer bytes.Buffer - logger := NewOutputWriter(m) - logger.Add(&testBuffer, f, InfoLevel, DebugLevel) - - testTime := f.Timestamp(InfoLevel, time.Now()) - - testInfo := "hello Dalton, how are you doing?" - logger.Info(testInfo) - - tesErr := "hello Austin, how did it break today?" - logger.Error(tesErr) - - testDebug := "hello Bill, who are you?" - logger.Debug(testDebug) - - m.Shutdown() - - lines := strings.Split(testBuffer.String(), "\n") - assert.Len(t, lines, 3, "only expected two strings in the buffer") - - infoLine := lines[0] - debugLine := lines[1] - - compareInfo := fmt.Sprintf("%s%s", testTime, testInfo) - assert.Equal(t, compareInfo, infoLine, "expect the strings to match") - - compareDebug := fmt.Sprintf("%s%s", testTime, testDebug) - assert.Equal(t, compareDebug, debugLine, "expect the strings to match") -} - -func TestOutputWrite(t *testing.T) { - timeFormat := "2006-01-02" - f := NewDefaultFormatter(timeFormat) - m := NewWriteManager() - - var testBuffer bytes.Buffer - logger := NewOutputWriter(m) - logger.Add(&testBuffer, f, InfoLevel) - - logger.Debugf("debug message not logged here") - - testData := "hello Bob Bork, how are you doing?" - logger.Info(testData) - testTime := f.Timestamp(InfoLevel, time.Now()) - - m.Shutdown() - - scanner := bufio.NewScanner(&testBuffer) - scanner.Scan() - line := scanner.Text() - assert.NoError(t, scanner.Err()) - - compareLine := fmt.Sprintf("%s%s", testTime, testData) - assert.Equal(t, compareLine, line, "expect the strings to match") -} - -func TestFatalWrite(t *testing.T) { - timeFormat := "2006-01-02" - f := NewDefaultFormatter(timeFormat) - m := NewWriteManager() - - var testBuffer bytes.Buffer - logger := NewOutputWriter(m) - logger.Add(&testBuffer, f, FatalLevel) - - oldOsExit := osExit - defer func() { osExit = oldOsExit }() - - var got int - myExit := func(code int) { - got = code - } - - osExit = myExit - - testData := "so long y'all" - logger.Fatal(testData) - testTime := f.Timestamp(FatalLevel, time.Now()) - - scanner := bufio.NewScanner(&testBuffer) - scanner.Scan() - line := scanner.Text() - assert.NoError(t, scanner.Err()) - - compareLine := fmt.Sprintf("%s%s", testTime, testData) - assert.Equal(t, compareLine, line, "expect the strings to match") - assert.Equal(t, got, 1, "exit code should be one for a fatal log") -} diff --git a/metrics/metrics.go b/metrics/metrics.go index 9784c66a..b4d9e99e 100644 --- a/metrics/metrics.go +++ b/metrics/metrics.go @@ -10,12 +10,12 @@ import ( "sync" "time" - "golang.org/x/net/trace" - "github.com/cloudflare/cloudflared/connection" - "github.com/cloudflare/cloudflared/logger" + "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "github.com/rs/zerolog" + "golang.org/x/net/trace" ) const ( @@ -23,12 +23,12 @@ const ( startupTime = time.Millisecond * 500 ) -func newMetricsHandler(connectionEvents <-chan connection.Event, log logger.Service) *http.ServeMux { +func newMetricsHandler(connectionEvents <-chan connection.Event, log *zerolog.Logger) *http.ServeMux { readyServer := NewReadyServer(connectionEvents, log) mux := http.NewServeMux() mux.Handle("/metrics", promhttp.Handler()) mux.HandleFunc("/healthcheck", func(w http.ResponseWriter, r *http.Request) { - fmt.Fprintf(w, "OK\n") + _, _ = fmt.Fprintf(w, "OK\n") }) mux.Handle("/ready", readyServer) return mux @@ -38,14 +38,14 @@ func ServeMetrics( l net.Listener, shutdownC <-chan struct{}, connectionEvents <-chan connection.Event, - logger logger.Service, + log *zerolog.Logger, ) (err error) { var wg sync.WaitGroup // Metrics port is privileged, so no need for further access control trace.AuthRequest = func(*http.Request) (bool, bool) { return true, true } // TODO: parameterize ReadTimeout and WriteTimeout. The maximum time we can // profile CPU usage depends on WriteTimeout - h := newMetricsHandler(connectionEvents, logger) + h := newMetricsHandler(connectionEvents, log) server := &http.Server{ ReadTimeout: 10 * time.Second, WriteTimeout: 10 * time.Second, @@ -57,22 +57,22 @@ func ServeMetrics( defer wg.Done() err = server.Serve(l) }() - logger.Infof("Starting metrics server on %s", fmt.Sprintf("%v/metrics", l.Addr())) + log.Info().Msgf("Starting metrics server on %s", fmt.Sprintf("%v/metrics", l.Addr())) // server.Serve will hang if server.Shutdown is called before the server is // fully started up. So add artificial delay. time.Sleep(startupTime) <-shutdownC ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout) - server.Shutdown(ctx) + _ = server.Shutdown(ctx) cancel() wg.Wait() if err == http.ErrServerClosed { - logger.Info("Metrics server stopped") + log.Info().Msg("Metrics server stopped") return nil } - logger.Errorf("Metrics server quit with error: %s", err) + log.Error().Msgf("Metrics server quit with error: %s", err) return err } diff --git a/metrics/readiness.go b/metrics/readiness.go index 856e3e1d..52826772 100644 --- a/metrics/readiness.go +++ b/metrics/readiness.go @@ -7,18 +7,19 @@ import ( "sync" conn "github.com/cloudflare/cloudflared/connection" - "github.com/cloudflare/cloudflared/logger" + + "github.com/rs/zerolog" ) // ReadyServer serves HTTP 200 if the tunnel can serve traffic. Intended for k8s readiness checks. type ReadyServer struct { sync.RWMutex isConnected map[int]bool - log logger.Service + log *zerolog.Logger } // NewReadyServer initializes a ReadyServer and starts listening for dis/connection events. -func NewReadyServer(connectionEvents <-chan conn.Event, log logger.Service) *ReadyServer { +func NewReadyServer(connectionEvents <-chan conn.Event, log *zerolog.Logger) *ReadyServer { rs := ReadyServer{ isConnected: make(map[int]bool, 0), log: log, @@ -37,7 +38,7 @@ func NewReadyServer(connectionEvents <-chan conn.Event, log logger.Service) *Rea case conn.SetURL: continue default: - rs.log.Errorf("Unknown connection event case %v", c) + rs.log.Error().Msgf("Unknown connection event case %v", c) } } }() @@ -59,9 +60,9 @@ func (rs *ReadyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { } msg, err := json.Marshal(body) if err != nil { - fmt.Fprintf(w, `{"error": "%s"}`, err) + _, _ = fmt.Fprintf(w, `{"error": "%s"}`, err) } - w.Write(msg) + _, _ = w.Write(msg) } // This is the bulk of the logic for ServeHTTP, broken into its own pure function diff --git a/origin/proxy.go b/origin/proxy.go index 8a0cbc2d..b77e1de1 100644 --- a/origin/proxy.go +++ b/origin/proxy.go @@ -13,10 +13,11 @@ import ( "github.com/cloudflare/cloudflared/buffer" "github.com/cloudflare/cloudflared/connection" "github.com/cloudflare/cloudflared/ingress" - "github.com/cloudflare/cloudflared/logger" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" "github.com/cloudflare/cloudflared/websocket" + "github.com/pkg/errors" + "github.com/rs/zerolog" ) const ( @@ -26,15 +27,15 @@ const ( type client struct { ingressRules ingress.Ingress tags []tunnelpogs.Tag - logger logger.Service + log *zerolog.Logger bufferPool *buffer.Pool } -func NewClient(ingressRules ingress.Ingress, tags []tunnelpogs.Tag, logger logger.Service) connection.OriginClient { +func NewClient(ingressRules ingress.Ingress, tags []tunnelpogs.Tag, log *zerolog.Logger) connection.OriginClient { return &client{ ingressRules: ingressRules, tags: tags, - logger: logger, + log: log, bufferPool: buffer.NewPool(512 * 1024), } } @@ -97,14 +98,14 @@ func (c *client) proxyHTTP(w connection.ResponseWriter, req *http.Request, rule return nil, errors.Wrap(err, "Error writing response header") } if connection.IsServerSentEvent(resp.Header) { - c.logger.Debug("Detected Server-Side Events from Origin") + c.log.Debug().Msg("Detected Server-Side Events from Origin") c.writeEventStream(w, resp.Body) } else { // Use CopyBuffer, because Copy only allocates a 32KiB buffer, and cross-stream // compression generates dictionary on first write buf := c.bufferPool.Get() defer c.bufferPool.Put(buf) - io.CopyBuffer(w, resp.Body, buf) + _, _ = io.CopyBuffer(w, resp.Body, buf) } return resp, nil } @@ -129,7 +130,7 @@ func (c *client) proxyWebsocket(w connection.ResponseWriter, req *http.Request, go func() { // serveCtx is done if req is cancelled, or streamWebsocket returns <-serveCtx.Done() - conn.Close() + _ = conn.Close() close(connClosedChan) }() @@ -159,7 +160,7 @@ func (c *client) writeEventStream(w connection.ResponseWriter, respBody io.ReadC if err != nil { break } - w.Write(line) + _, _ = w.Write(line) } } @@ -171,46 +172,46 @@ func (c *client) appendTagHeaders(r *http.Request) { func (c *client) logRequest(r *http.Request, cfRay string, lbProbe bool, ruleNum int) { if cfRay != "" { - c.logger.Debugf("CF-RAY: %s %s %s %s", cfRay, r.Method, r.URL, r.Proto) + c.log.Debug().Msgf("CF-RAY: %s %s %s %s", cfRay, r.Method, r.URL, r.Proto) } else if lbProbe { - c.logger.Debugf("CF-RAY: %s Load Balancer health check %s %s %s", cfRay, r.Method, r.URL, r.Proto) + c.log.Debug().Msgf("CF-RAY: %s Load Balancer health check %s %s %s", cfRay, r.Method, r.URL, r.Proto) } else { - c.logger.Debugf("All requests should have a CF-RAY header. Please open a support ticket with Cloudflare. %s %s %s ", r.Method, r.URL, r.Proto) + c.log.Debug().Msgf("All requests should have a CF-RAY header. Please open a support ticket with Cloudflare. %s %s %s ", r.Method, r.URL, r.Proto) } - c.logger.Debugf("CF-RAY: %s Request Headers %+v", cfRay, r.Header) - c.logger.Debugf("CF-RAY: %s Serving with ingress rule %d", cfRay, ruleNum) + c.log.Debug().Msgf("CF-RAY: %s Request Headers %+v", cfRay, r.Header) + c.log.Debug().Msgf("CF-RAY: %s Serving with ingress rule %d", cfRay, ruleNum) if contentLen := r.ContentLength; contentLen == -1 { - c.logger.Debugf("CF-RAY: %s Request Content length unknown", cfRay) + c.log.Debug().Msgf("CF-RAY: %s Request Content length unknown", cfRay) } else { - c.logger.Debugf("CF-RAY: %s Request content length %d", cfRay, contentLen) + c.log.Debug().Msgf("CF-RAY: %s Request content length %d", cfRay, contentLen) } } func (c *client) logOriginResponse(r *http.Response, cfRay string, lbProbe bool, ruleNum int) { responseByCode.WithLabelValues(strconv.Itoa(r.StatusCode)).Inc() if cfRay != "" { - c.logger.Debugf("CF-RAY: %s Status: %s served by ingress %d", cfRay, r.Status, ruleNum) + c.log.Info().Msgf("CF-RAY: %s Status: %s served by ingress %d", cfRay, r.Status, ruleNum) } else if lbProbe { - c.logger.Debugf("Response to Load Balancer health check %s", r.Status) + c.log.Debug().Msgf("Response to Load Balancer health check %s", r.Status) } else { - c.logger.Debugf("Status: %s served by ingress %d", r.Status, ruleNum) + c.log.Debug().Msgf("Status: %s served by ingress %d", r.Status, ruleNum) } - c.logger.Debugf("CF-RAY: %s Response Headers %+v", cfRay, r.Header) + c.log.Debug().Msgf("CF-RAY: %s Response Headers %+v", cfRay, r.Header) if contentLen := r.ContentLength; contentLen == -1 { - c.logger.Debugf("CF-RAY: %s Response content length unknown", cfRay) + c.log.Debug().Msgf("CF-RAY: %s Response content length unknown", cfRay) } else { - c.logger.Debugf("CF-RAY: %s Response content length %d", cfRay, contentLen) + c.log.Debug().Msgf("CF-RAY: %s Response content length %d", cfRay, contentLen) } } func (c *client) logRequestError(err error, cfRay string, ruleNum int) { requestErrors.Inc() if cfRay != "" { - c.logger.Errorf("CF-RAY: %s Proxying to ingress %d error: %v", cfRay, ruleNum, err) + c.log.Error().Msgf("CF-RAY: %s Proxying to ingress %d error: %v", cfRay, ruleNum, err) } else { - c.logger.Errorf("Proxying to ingress %d error: %v", ruleNum, err) + c.log.Error().Msgf("Proxying to ingress %d error: %v", ruleNum, err) } } diff --git a/origin/proxy_test.go b/origin/proxy_test.go index d20be70a..6e07a559 100644 --- a/origin/proxy_test.go +++ b/origin/proxy_test.go @@ -16,11 +16,11 @@ import ( "github.com/cloudflare/cloudflared/connection" "github.com/cloudflare/cloudflared/hello" "github.com/cloudflare/cloudflared/ingress" - "github.com/cloudflare/cloudflared/logger" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" "github.com/urfave/cli/v2" "github.com/gobwas/ws/wsutil" + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -49,7 +49,7 @@ func (w *mockHTTPRespWriter) WriteRespHeaders(resp *http.Response) error { func (w *mockHTTPRespWriter) WriteErrorResponse() { w.WriteHeader(http.StatusBadGateway) - w.Write([]byte("http response error")) + _, _ = w.Write([]byte("http response error")) } func (w *mockHTTPRespWriter) Read(data []byte) (int, error) { @@ -106,8 +106,7 @@ func (w *mockSSERespWriter) ReadBytes() []byte { } func TestProxySingleOrigin(t *testing.T) { - logger, err := logger.New() - require.NoError(t, err) + log := zerolog.Nop() ctx, cancel := context.WithCancel(context.Background()) @@ -115,18 +114,18 @@ func TestProxySingleOrigin(t *testing.T) { flagSet.Bool("hello-world", true, "") cliCtx := cli.NewContext(cli.NewApp(), flagSet, nil) - err = cliCtx.Set("hello-world", "true") + err := cliCtx.Set("hello-world", "true") require.NoError(t, err) allowURLFromArgs := false - ingressRule, err := ingress.NewSingleOrigin(cliCtx, allowURLFromArgs, logger) + ingressRule, err := ingress.NewSingleOrigin(cliCtx, allowURLFromArgs) require.NoError(t, err) var wg sync.WaitGroup errC := make(chan error) - ingressRule.StartOrigins(&wg, logger, ctx.Done(), errC) + ingressRule.StartOrigins(&wg, &log, ctx.Done(), errC) - client := NewClient(ingressRule, testTags, logger) + client := NewClient(ingressRule, testTags, &log) t.Run("testProxyHTTP", testProxyHTTP(t, client)) t.Run("testProxyWebsocket", testProxyWebsocket(t, client)) t.Run("testProxySSE", testProxySSE(t, client)) @@ -191,7 +190,7 @@ func testProxySSE(t *testing.T, client connection.OriginClient) func(t *testing. return func(t *testing.T) { var ( pushCount = 50 - pushFreq = time.Duration(time.Millisecond * 10) + pushFreq = time.Millisecond * 10 ) respWriter := newMockSSERespWriter() ctx, cancel := context.WithCancel(context.Background()) @@ -252,15 +251,14 @@ func TestProxyMultipleOrigins(t *testing.T) { }) require.NoError(t, err) - logger, err := logger.New() - require.NoError(t, err) + log := zerolog.Nop() ctx, cancel := context.WithCancel(context.Background()) errC := make(chan error) var wg sync.WaitGroup - ingress.StartOrigins(&wg, logger, ctx.Done(), errC) + ingress.StartOrigins(&wg, &log, ctx.Done(), errC) - client := NewClient(ingress, testTags, logger) + client := NewClient(ingress, testTags, &log) tests := []struct { url string @@ -314,7 +312,7 @@ type mockAPI struct{} func (ma mockAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) { w.WriteHeader(http.StatusCreated) - w.Write([]byte("Created")) + _, _ = w.Write([]byte("Created")) } type errorOriginTransport struct{} @@ -336,10 +334,9 @@ func TestProxyError(t *testing.T) { }, } - logger, err := logger.New() - require.NoError(t, err) + log := zerolog.Nop() - client := NewClient(ingress, testTags, logger) + client := NewClient(ingress, testTags, &log) respWriter := newMockHTTPRespWriter() req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1", nil) diff --git a/origin/supervisor.go b/origin/supervisor.go index 7759e103..574dd36a 100644 --- a/origin/supervisor.go +++ b/origin/supervisor.go @@ -6,14 +6,14 @@ import ( "net" "time" - "github.com/google/uuid" - "github.com/cloudflare/cloudflared/connection" "github.com/cloudflare/cloudflared/edgediscovery" "github.com/cloudflare/cloudflared/h2mux" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/signal" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + + "github.com/google/uuid" + "github.com/rs/zerolog" ) const ( @@ -50,7 +50,7 @@ type Supervisor struct { nextConnectedIndex int nextConnectedSignal chan struct{} - logger logger.Service + log *zerolog.Logger reconnectCredentialManager *reconnectCredentialManager useReconnectToken bool @@ -68,9 +68,9 @@ func NewSupervisor(config *TunnelConfig, cloudflaredUUID uuid.UUID) (*Supervisor err error ) if len(config.EdgeAddrs) > 0 { - edgeIPs, err = edgediscovery.StaticEdge(config.Logger, config.EdgeAddrs) + edgeIPs, err = edgediscovery.StaticEdge(config.Log, config.EdgeAddrs) } else { - edgeIPs, err = edgediscovery.ResolveEdge(config.Logger) + edgeIPs, err = edgediscovery.ResolveEdge(config.Log) } if err != nil { return nil, err @@ -87,7 +87,7 @@ func NewSupervisor(config *TunnelConfig, cloudflaredUUID uuid.UUID) (*Supervisor edgeIPs: edgeIPs, tunnelErrors: make(chan tunnelError), tunnelsConnecting: map[int]chan struct{}{}, - logger: config.Logger, + log: config.Log, reconnectCredentialManager: newReconnectCredentialManager(connection.MetricsNamespace, connection.TunnelSubsystem, config.HAConnections), useReconnectToken: useReconnectToken, }, nil @@ -110,7 +110,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re if timer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate); err == nil { refreshAuthBackoffTimer = timer } else { - s.logger.Errorf("supervisor: initial refreshAuth failed, retrying in %v: %s", refreshAuthRetryDuration, err) + s.log.Error().Msgf("supervisor: initial refreshAuth failed, retrying in %v: %s", refreshAuthRetryDuration, err) refreshAuthBackoffTimer = time.After(refreshAuthRetryDuration) } } @@ -129,7 +129,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re case tunnelError := <-s.tunnelErrors: tunnelsActive-- if tunnelError.err != nil { - s.logger.Infof("supervisor: Tunnel disconnected due to error: %s", tunnelError.err) + s.log.Info().Msgf("supervisor: Tunnel disconnected due to error: %s", tunnelError.err) tunnelsWaiting = append(tunnelsWaiting, tunnelError.index) s.waitForNextTunnel(tunnelError.index) @@ -152,7 +152,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re case <-refreshAuthBackoffTimer: newTimer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate) if err != nil { - s.logger.Errorf("supervisor: Authentication failed: %s", err) + s.log.Error().Msgf("supervisor: Authentication failed: %s", err) // Permanent failure. Leave the `select` without setting the // channel to be non-null, so we'll never hit this case of the `select` again. continue @@ -172,7 +172,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re func (s *Supervisor) initialize(ctx context.Context, connectedSignal *signal.Signal, reconnectCh chan ReconnectSignal) error { availableAddrs := int(s.edgeIPs.AvailableAddrs()) if s.config.HAConnections > availableAddrs { - s.logger.Infof("You requested %d HA connections but I can give you at most %d.", s.config.HAConnections, availableAddrs) + s.log.Info().Msgf("You requested %d HA connections but I can give you at most %d.", s.config.HAConnections, availableAddrs) s.config.HAConnections = availableAddrs } @@ -295,7 +295,7 @@ func (s *Supervisor) authenticate(ctx context.Context, numPreviousAttempts int) // This callback is invoked by h2mux when the edge initiates a stream. return nil // noop }) - muxerConfig := s.config.MuxerConfig.H2MuxerConfig(handler, s.logger) + muxerConfig := s.config.MuxerConfig.H2MuxerConfig(handler, s.log) muxer, err := h2mux.Handshake(edgeConn, edgeConn, *muxerConfig, h2mux.ActiveStreams) if err != nil { return nil, err @@ -311,7 +311,7 @@ func (s *Supervisor) authenticate(ctx context.Context, numPreviousAttempts int) if err != nil { return nil, err } - rpcClient := connection.NewTunnelServerClient(ctx, stream, s.logger) + rpcClient := connection.NewTunnelServerClient(ctx, stream, s.log) defer rpcClient.Close() const arbitraryConnectionID = uint8(0) diff --git a/origin/tunnel.go b/origin/tunnel.go index f175d978..9405eccf 100644 --- a/origin/tunnel.go +++ b/origin/tunnel.go @@ -13,13 +13,13 @@ import ( "github.com/google/uuid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "github.com/rs/zerolog" "golang.org/x/sync/errgroup" "github.com/cloudflare/cloudflared/cmd/cloudflared/buildinfo" "github.com/cloudflare/cloudflared/connection" "github.com/cloudflare/cloudflared/edgediscovery" "github.com/cloudflare/cloudflared/h2mux" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/signal" "github.com/cloudflare/cloudflared/tunnelrpc" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" @@ -55,7 +55,7 @@ type TunnelConfig struct { IsFreeTunnel bool LBPool string Tags []tunnelpogs.Tag - Logger logger.Service + Log *zerolog.Logger Observer *connection.Observer ReportedVersion string Retries uint @@ -235,7 +235,7 @@ func waitForBackoff( } config.Observer.SendReconnect(connIndex) - config.Logger.Infof("Retrying connection %d in %s seconds, error %v", connIndex, duration, err) + config.Log.Info().Msgf("Retrying connection %d in %s seconds, error %v", connIndex, duration, err) protobackoff.Backoff(ctx) if protobackoff.ReachedMaxRetries() { @@ -247,13 +247,13 @@ func waitForBackoff( if protobackoff.protocol == fallback { return err } - config.Logger.Infof("Fallback to use %s", fallback) + config.Log.Info().Msgf("Fallback to use %s", fallback) protobackoff.fallback(fallback) } else if !protobackoff.inFallback { current := config.ProtocolSelector.Current() if protobackoff.protocol != current { protobackoff.protocol = current - config.Logger.Infof("Change protocol to %s", current) + config.Log.Info().Msgf("Change protocol to %s", current) } } return nil @@ -311,9 +311,16 @@ func ServeH2mux( cloudflaredUUID uuid.UUID, reconnectCh chan ReconnectSignal, ) (err error, recoverable bool) { - config.Logger.Debugf("Connecting via h2mux") + config.Log.Debug().Msgf("Connecting via h2mux") // Returns error from parsing the origin URL or handshake errors - handler, err, recoverable := connection.NewH2muxConnection(ctx, config.ConnectionConfig, config.MuxerConfig, edgeConn, connectionIndex, config.Observer) + handler, err, recoverable := connection.NewH2muxConnection( + ctx, + config.ConnectionConfig, + config.MuxerConfig, + edgeConn, + connectionIndex, + config.Observer, + ) if err != nil { return err, recoverable } @@ -338,29 +345,29 @@ func ServeH2mux( // don't retry this connection anymore, let supervisor pick new a address return err, false case *serverRegisterTunnelError: - config.Logger.Errorf("Register tunnel error from server side: %s", err.cause) + config.Log.Error().Msgf("Register tunnel error from server side: %s", err.cause) // Don't send registration error return from server to Sentry. They are // logged on server side if incidents := config.IncidentLookup.ActiveIncidents(); len(incidents) > 0 { - config.Logger.Error(activeIncidentsMsg(incidents)) + config.Log.Error().Msg(activeIncidentsMsg(incidents)) } return err.cause, !err.permanent case *clientRegisterTunnelError: - config.Logger.Errorf("Register tunnel error on client side: %s", err.cause) + config.Log.Error().Msgf("Register tunnel error on client side: %s", err.cause) return err, true case *muxerShutdownError: - config.Logger.Info("Muxer shutdown") + config.Log.Info().Msg("Muxer shutdown") return err, true case *ReconnectSignal: - config.Logger.Infof("Restarting connection %d due to reconnect signal in %s", connectionIndex, err.Delay) + config.Log.Info().Msgf("Restarting connection %d due to reconnect signal in %s", connectionIndex, err.Delay) err.DelayBeforeReconnect() return err, true default: if err == context.Canceled { - config.Logger.Debugf("Serve tunnel error: %s", err) + config.Log.Debug().Msgf("Serve tunnel error: %s", err) return err, false } - config.Logger.Errorf("Serve tunnel error: %s", err) + config.Log.Error().Msgf("Serve tunnel error: %s", err) return err, true } } @@ -376,8 +383,16 @@ func ServeHTTP2( connectedFuse connection.ConnectedFuse, reconnectCh chan ReconnectSignal, ) (err error, recoverable bool) { - config.Logger.Debugf("Connecting via http2") - server := connection.NewHTTP2Connection(tlsServerConn, config.ConnectionConfig, config.NamedTunnel, connOptions, config.Observer, connIndex, connectedFuse) + config.Log.Debug().Msgf("Connecting via http2") + server := connection.NewHTTP2Connection( + tlsServerConn, + config.ConnectionConfig, + config.NamedTunnel, + connOptions, + config.Observer, + connIndex, + connectedFuse, + ) errGroup, serveCtx := errgroup.WithContext(ctx) errGroup.Go(func() error { diff --git a/origin/tunnel_test.go b/origin/tunnel_test.go index 390ef1c6..f3e91982 100644 --- a/origin/tunnel_test.go +++ b/origin/tunnel_test.go @@ -7,7 +7,8 @@ import ( "time" "github.com/cloudflare/cloudflared/connection" - "github.com/cloudflare/cloudflared/logger" + + "github.com/rs/zerolog" "github.com/stretchr/testify/assert" ) @@ -31,8 +32,7 @@ func TestWaitForBackoffFallback(t *testing.T) { BaseTime: time.Millisecond * 10, } ctx := context.Background() - logger, err := logger.New() - assert.NoError(t, err) + log := zerolog.Nop() resolveTTL := time.Duration(0) namedTunnel := &connection.NamedTunnelConfig{ Credentials: connection.Credentials{ @@ -42,10 +42,16 @@ func TestWaitForBackoffFallback(t *testing.T) { mockFetcher := dynamicMockFetcher{ percentage: 0, } - protocolSelector, err := connection.NewProtocolSelector(connection.HTTP2.String(), namedTunnel, mockFetcher.fetch(), resolveTTL, logger) + protocolSelector, err := connection.NewProtocolSelector( + connection.HTTP2.String(), + namedTunnel, + mockFetcher.fetch(), + resolveTTL, + &log, + ) assert.NoError(t, err) config := &TunnelConfig{ - Logger: logger, + Log: &log, ProtocolSelector: protocolSelector, Observer: connection.NewObserver(nil, nil, false), } diff --git a/sshlog/empty_manager.go b/sshlog/empty_manager.go deleted file mode 100644 index e95825e6..00000000 --- a/sshlog/empty_manager.go +++ /dev/null @@ -1,37 +0,0 @@ -package sshlog - -import ( - "io" - - "github.com/cloudflare/cloudflared/logger" -) - -//empty manager implements the Manager but does nothing (for testing and to disable logging unless the logs are set) -type emptyManager struct { -} - -type emptyWriteCloser struct { -} - -// NewEmptyManager creates a new instance of a log empty log manager that does nothing -func NewEmptyManager() Manager { - return &emptyManager{} -} - -func (m *emptyManager) NewLogger(name string, logger logger.Service) (io.WriteCloser, error) { - return &emptyWriteCloser{}, nil -} - -func (m *emptyManager) NewSessionLogger(name string, logger logger.Service) (io.WriteCloser, error) { - return &emptyWriteCloser{}, nil -} - -// emptyWriteCloser - -func (w *emptyWriteCloser) Write(p []byte) (n int, err error) { - return len(p), nil -} - -func (w *emptyWriteCloser) Close() error { - return nil -} diff --git a/sshlog/go.capnp b/sshlog/go.capnp deleted file mode 100644 index c12d70a4..00000000 --- a/sshlog/go.capnp +++ /dev/null @@ -1,15 +0,0 @@ -# Generate go.capnp.out with: -# capnp compile -o- go.capnp > go.capnp.out -# Must run inside this directory to preserve paths. - -@0xd12a1c51fedd6c88; - -annotation package(file) :Text; -annotation import(file) :Text; -annotation doc(struct, field, enum) :Text; -annotation tag(enumerant) :Text; -annotation notag(enumerant) :Void; -annotation customtype(field) :Text; -annotation name(struct, field, union, enum, enumerant, interface, method, param, annotation, const, group) :Text; - -$package("capnp"); diff --git a/sshlog/logger.go b/sshlog/logger.go deleted file mode 100644 index b62f87b3..00000000 --- a/sshlog/logger.go +++ /dev/null @@ -1,167 +0,0 @@ -package sshlog - -import ( - "bufio" - "errors" - "fmt" - "os" - "path/filepath" - "sync" - "time" - - "github.com/cloudflare/cloudflared/logger" -) - -const ( - logTimeFormat = "2006-01-02T15-04-05.000" - megabyte = 1024 * 1024 - defaultFileSizeLimit = 100 * megabyte -) - -// Logger will buffer and write events to disk -type Logger struct { - sync.Mutex - filename string - file *os.File - writeBuffer *bufio.Writer - logger logger.Service - flushInterval time.Duration - maxFileSize int64 - done chan struct{} - once sync.Once -} - -// NewLogger creates a Logger instance. A buffer is created that needs to be -// drained and closed when the caller is finished, so instances should call -// Close when finished with this Logger instance. Writes will be flushed to disk -// every second (fsync). filename is the name of the logfile to be created. The -// logger variable is a logger service that will log all i/o, filesystem error etc, that -// that shouldn't end execution of the logger, but are useful to report to the -// caller. -func NewLogger(filename string, logger logger.Service, flushInterval time.Duration, maxFileSize int64) (*Logger, error) { - if logger == nil { - return nil, errors.New("logger can't be nil") - } - f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) - if err != nil { - return nil, err - } - l := &Logger{filename: filename, - file: f, - writeBuffer: bufio.NewWriter(f), - logger: logger, - flushInterval: flushInterval, - maxFileSize: maxFileSize, - done: make(chan struct{}), - } - - go l.writer() - return l, nil -} - -// Writes to a log buffer. Implements the io.Writer interface. -func (l *Logger) Write(p []byte) (n int, err error) { - l.Lock() - defer l.Unlock() - return l.writeBuffer.Write(p) -} - -// Close drains anything left in the buffer and cleans up any resources still -// in use. -func (l *Logger) Close() error { - l.once.Do(func() { - close(l.done) - }) - if err := l.write(); err != nil { - return err - } - return l.file.Close() -} - -// writer is the run loop that handles draining the write buffer and syncing -// data to disk. -func (l *Logger) writer() { - ticker := time.NewTicker(l.flushInterval) - defer ticker.Stop() - for { - select { - case <-ticker.C: - if err := l.write(); err != nil { - l.logger.Errorf("%s", err) - } - case <-l.done: - return - } - } -} - -// write does the actual system write calls to disk and does a rotation if the -// file size limit has been reached. Since the rotation happens at the end, -// the rotation is a soft limit (aka the file can be bigger than the max limit -// because of the final buffer flush) -func (l *Logger) write() error { - l.Lock() - defer l.Unlock() - - if l.writeBuffer.Buffered() <= 0 { - return nil - } - - if err := l.writeBuffer.Flush(); err != nil { - return err - } - - if err := l.file.Sync(); err != nil { - return err - } - - if l.shouldRotate() { - return l.rotate() - } - return nil -} - -// shouldRotate checks to see if the current file should be rotated to a new -// logfile. -func (l *Logger) shouldRotate() bool { - info, err := l.file.Stat() - if err != nil { - return false - } - - return info.Size() >= l.maxFileSize -} - -// rotate creates a new logfile with the existing filename and renames the -// existing file with a current timestamp. -func (l *Logger) rotate() error { - if err := l.file.Close(); err != nil { - return err - } - - // move the existing file - newname := rotationName(l.filename) - if err := os.Rename(l.filename, newname); err != nil { - return fmt.Errorf("can't rename log file: %s", err) - } - - f, err := os.OpenFile(l.filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600)) - if err != nil { - return fmt.Errorf("failed to open new logfile %s", err) - } - l.file = f - l.writeBuffer = bufio.NewWriter(f) - return nil -} - -// rotationName creates a new filename from the given name, inserting a timestamp -// between the filename and the extension. -func rotationName(name string) string { - dir := filepath.Dir(name) - filename := filepath.Base(name) - ext := filepath.Ext(filename) - prefix := filename[:len(filename)-len(ext)] - t := time.Now() - timestamp := t.Format(logTimeFormat) - return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext)) -} diff --git a/sshlog/logger_test.go b/sshlog/logger_test.go deleted file mode 100644 index 061b9e33..00000000 --- a/sshlog/logger_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package sshlog - -import ( - "log" - "os" - "path/filepath" - "strings" - "testing" - "time" - - "github.com/cloudflare/cloudflared/logger" -) - -const logFileName = "test-logger.log" - -func createLogger(t *testing.T) *Logger { - os.Remove(logFileName) - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - logger, err := NewLogger(logFileName, l, time.Millisecond, 1024) - if err != nil { - t.Fatal("couldn't create the logger!", err) - } - return logger -} - -// AUTH-2115 TODO: fix this test -//func TestWrite(t *testing.T) { -// testStr := "hi" -// logger := createLogger(t) -// defer func() { -// logger.Close() -// os.Remove(logFileName) -// }() -// -// logger.Write([]byte(testStr)) -// time.DelayBeforeReconnect(2 * time.Millisecond) -// data, err := ioutil.ReadFile(logFileName) -// if err != nil { -// t.Fatal("couldn't read the log file!", err) -// } -// checkStr := string(data) -// if checkStr != testStr { -// t.Fatal("file data doesn't match!") -// } -//} - -func TestFilenameRotation(t *testing.T) { - newName := rotationName("dir/bob/acoolloggername.log") - - dir := filepath.Dir(newName) - if dir != "dir/bob" { - t.Fatal("rotation name doesn't respect the directory filepath:", newName) - } - - filename := filepath.Base(newName) - if !strings.HasPrefix(filename, "acoolloggername") { - t.Fatal("rotation filename is wrong:", filename) - } - - ext := filepath.Ext(newName) - if ext != ".log" { - t.Fatal("rotation file extension is wrong:", ext) - } -} - -func TestRotation(t *testing.T) { - logger := createLogger(t) - - for i := 0; i < 2000; i++ { - logger.Write([]byte("a string for testing rotation\n")) - } - logger.Close() - - count := 0 - filepath.Walk(".", func(path string, info os.FileInfo, err error) error { - if err != nil || info.IsDir() { - return nil - } - if strings.HasPrefix(info.Name(), "test-logger") { - log.Println("deleting: ", path) - os.Remove(path) - count++ - } - return nil - }) - if count < 2 { - t.Fatal("rotation didn't roll files:", count) - } - -} diff --git a/sshlog/manager.go b/sshlog/manager.go deleted file mode 100644 index c3045287..00000000 --- a/sshlog/manager.go +++ /dev/null @@ -1,34 +0,0 @@ -package sshlog - -import ( - "io" - "path/filepath" - "time" - - "github.com/cloudflare/cloudflared/logger" -) - -// Manager be managing logs bruh -type Manager interface { - NewLogger(string, logger.Service) (io.WriteCloser, error) - NewSessionLogger(string, logger.Service) (io.WriteCloser, error) -} - -type manager struct { - baseDirectory string -} - -// New creates a new instance of a log manager -func New(baseDirectory string) Manager { - return &manager{ - baseDirectory: baseDirectory, - } -} - -func (m *manager) NewLogger(name string, logger logger.Service) (io.WriteCloser, error) { - return NewLogger(filepath.Join(m.baseDirectory, name), logger, time.Second, defaultFileSizeLimit) -} - -func (m *manager) NewSessionLogger(name string, logger logger.Service) (io.WriteCloser, error) { - return NewSessionLogger(filepath.Join(m.baseDirectory, name), logger, time.Second, defaultFileSizeLimit) -} diff --git a/sshlog/session_log.capnp b/sshlog/session_log.capnp deleted file mode 100644 index f3fff09a..00000000 --- a/sshlog/session_log.capnp +++ /dev/null @@ -1,9 +0,0 @@ -using Go = import "go.capnp"; -@0x8f43375162194466; -$Go.package("sshlog"); -$Go.import("github.com/cloudflare/cloudflared/sshlog"); - -struct SessionLog { - timestamp @0 :Text; - content @1 :Data; -} \ No newline at end of file diff --git a/sshlog/session_log.capnp.go b/sshlog/session_log.capnp.go deleted file mode 100644 index 7b61615c..00000000 --- a/sshlog/session_log.capnp.go +++ /dev/null @@ -1,110 +0,0 @@ -// Code generated by capnpc-go. DO NOT EDIT. - -package sshlog - -import ( - capnp "zombiezen.com/go/capnproto2" - text "zombiezen.com/go/capnproto2/encoding/text" - schemas "zombiezen.com/go/capnproto2/schemas" -) - -type SessionLog struct{ capnp.Struct } - -// SessionLog_TypeID is the unique identifier for the type SessionLog. -const SessionLog_TypeID = 0xa13a07c504a5ab64 - -func NewSessionLog(s *capnp.Segment) (SessionLog, error) { - st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) - return SessionLog{st}, err -} - -func NewRootSessionLog(s *capnp.Segment) (SessionLog, error) { - st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}) - return SessionLog{st}, err -} - -func ReadRootSessionLog(msg *capnp.Message) (SessionLog, error) { - root, err := msg.RootPtr() - return SessionLog{root.Struct()}, err -} - -func (s SessionLog) String() string { - str, _ := text.Marshal(0xa13a07c504a5ab64, s.Struct) - return str -} - -func (s SessionLog) Timestamp() (string, error) { - p, err := s.Struct.Ptr(0) - return p.Text(), err -} - -func (s SessionLog) HasTimestamp() bool { - p, err := s.Struct.Ptr(0) - return p.IsValid() || err != nil -} - -func (s SessionLog) TimestampBytes() ([]byte, error) { - p, err := s.Struct.Ptr(0) - return p.TextBytes(), err -} - -func (s SessionLog) SetTimestamp(v string) error { - return s.Struct.SetText(0, v) -} - -func (s SessionLog) Content() ([]byte, error) { - p, err := s.Struct.Ptr(1) - return []byte(p.Data()), err -} - -func (s SessionLog) HasContent() bool { - p, err := s.Struct.Ptr(1) - return p.IsValid() || err != nil -} - -func (s SessionLog) SetContent(v []byte) error { - return s.Struct.SetData(1, v) -} - -// SessionLog_List is a list of SessionLog. -type SessionLog_List struct{ capnp.List } - -// NewSessionLog creates a new list of SessionLog. -func NewSessionLog_List(s *capnp.Segment, sz int32) (SessionLog_List, error) { - l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz) - return SessionLog_List{l}, err -} - -func (s SessionLog_List) At(i int) SessionLog { return SessionLog{s.List.Struct(i)} } - -func (s SessionLog_List) Set(i int, v SessionLog) error { return s.List.SetStruct(i, v.Struct) } - -func (s SessionLog_List) String() string { - str, _ := text.MarshalList(0xa13a07c504a5ab64, s.List) - return str -} - -// SessionLog_Promise is a wrapper for a SessionLog promised by a client call. -type SessionLog_Promise struct{ *capnp.Pipeline } - -func (p SessionLog_Promise) Struct() (SessionLog, error) { - s, err := p.Pipeline.Struct() - return SessionLog{s}, err -} - -const schema_8f43375162194466 = "x\xda\x120q`\x12d\x8dg`\x08dae\xfb" + - "\x9f\xb2z)\xcbQv\xab\x85\x0c\x82B\x8c\xff\xd3\\" + - "$\x93\x02\xcd\x9d\xfb\x19X\x99\xd8\x19\x18\x04E_\x09" + - "*\x82h\xd9r\x06\xc6\xff\xc5\xa9\xc5\xc5\x99\xf9y\xf1" + - "L9\xf9\xe9z\xc9\x89\x05y\x05V\xc1`!\xfe<" + - "\x9f\xfc\xf4\x00F\xc6@\x0ef\x16\x06\x06\x16F\x06\x06" + - "A\xcd \x06\x86@\x0df\xc6@\x13&FAFF" + - "\x11F\x90\xa0\xa1\x13\x03C\xa0\x0e3c\xa0\x05\x13\xe3" + - "\xff\x92\xcc\xdc\xd4\xe2\x92\xc4\\\x06\xc6\x02F\x1e\x06&" + - "F\x1e\x06\xc6\xfa\xe4\xfc\xbc\x92\xd4\xbc\x12F^\x06&" + - "F^\x06F@\x00\x00\x00\xff\xff\xdaK$\x1a" - -func init() { - schemas.Register(schema_8f43375162194466, - 0xa13a07c504a5ab64) -} diff --git a/sshlog/session_logger.go b/sshlog/session_logger.go deleted file mode 100644 index 1f4ab262..00000000 --- a/sshlog/session_logger.go +++ /dev/null @@ -1,71 +0,0 @@ -package sshlog - -import ( - "time" - - "github.com/cloudflare/cloudflared/logger" - capnp "zombiezen.com/go/capnproto2" - "zombiezen.com/go/capnproto2/pogs" -) - -// SessionLogger will buffer and write events to disk using capnp proto for session replay -type SessionLogger struct { - logger *Logger - encoder *capnp.Encoder -} - -type sessionLogData struct { - Timestamp string // The UTC timestamp of when the log occurred - Content []byte // The shell output -} - -// NewSessionLogger creates a new session logger by encapsulating a Logger object and writing capnp encoded messages to it -func NewSessionLogger(filename string, logger logger.Service, flushInterval time.Duration, maxFileSize int64) (*SessionLogger, error) { - l, err := NewLogger(filename, logger, flushInterval, maxFileSize) - if err != nil { - return nil, err - } - sessionLogger := &SessionLogger{ - logger: l, - encoder: capnp.NewEncoder(l), - } - return sessionLogger, nil -} - -// Writes to a log buffer. Implements the io.Writer interface. -func (l *SessionLogger) Write(p []byte) (n int, err error) { - return l.writeSessionLog(&sessionLogData{ - Timestamp: time.Now().UTC().Format(time.RFC3339), - Content: p, - }) -} - -// Close drains anything left in the buffer and cleans up any resources still -// in use. -func (l *SessionLogger) Close() error { - return l.logger.Close() -} - -func (l *SessionLogger) writeSessionLog(p *sessionLogData) (int, error) { - msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil)) - if err != nil { - return 0, err - } - log, err := NewRootSessionLog(seg) - if err != nil { - return 0, err - } - log.SetTimestamp(p.Timestamp) - log.SetContent(p.Content) - - if err := l.encoder.Encode(msg); err != nil { - return 0, err - } - return len(p.Content), nil -} - -func unmarshalSessionLog(s SessionLog) (*sessionLogData, error) { - p := new(sessionLogData) - err := pogs.Extract(p, SessionLog_TypeID, s.Struct) - return p, err -} diff --git a/sshlog/session_logger_test.go b/sshlog/session_logger_test.go deleted file mode 100644 index 60b4dac2..00000000 --- a/sshlog/session_logger_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package sshlog - -import ( - "os" - "testing" - "time" - - "github.com/cloudflare/cloudflared/logger" - capnp "zombiezen.com/go/capnproto2" -) - -const sessionLogFileName = "test-session-logger.log" - -func createSessionLogger(t *testing.T) *SessionLogger { - os.Remove(sessionLogFileName) - l := logger.NewOutputWriter(logger.NewMockWriteManager()) - logger, err := NewSessionLogger(sessionLogFileName, l, time.Millisecond, 1024) - if err != nil { - t.Fatal("couldn't create the logger!", err) - } - return logger -} - -func TestSessionLogWrite(t *testing.T) { - testStr := "hi" - logger := createSessionLogger(t) - defer func() { - os.Remove(sessionLogFileName) - }() - - logger.Write([]byte(testStr)) - logger.Close() - - f, err := os.Open(sessionLogFileName) - if err != nil { - t.Fatal("couldn't read the log file!", err) - } - defer f.Close() - - msg, err := capnp.NewDecoder(f).Decode() - if err != nil { - t.Fatal("couldn't read the capnp msg file!", err) - } - - sessionLog, err := ReadRootSessionLog(msg) - if err != nil { - t.Fatal("couldn't read the session log from the msg!", err) - } - - timeStr, err := sessionLog.Timestamp() - if err != nil { - t.Fatal("couldn't read the Timestamp field!", err) - } - - _, terr := time.Parse(time.RFC3339, timeStr) - if terr != nil { - t.Fatal("couldn't parse the Timestamp into the expected RFC3339 format", terr) - } - - data, err := sessionLog.Content() - if err != nil { - t.Fatal("couldn't read the Content field!", err) - } - - checkStr := string(data) - if checkStr != testStr { - t.Fatal("file data doesn't match!") - } -} diff --git a/sshserver/host_keys.go b/sshserver/host_keys.go deleted file mode 100644 index 8c16d013..00000000 --- a/sshserver/host_keys.go +++ /dev/null @@ -1,114 +0,0 @@ -//+build !windows - -package sshserver - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "crypto/rsa" - "crypto/x509" - "encoding/pem" - "fmt" - "io/ioutil" - "os" - "path/filepath" - - "github.com/gliderlabs/ssh" - "github.com/pkg/errors" -) - -const ( - rsaFilename = "ssh_host_rsa_key" - ecdsaFilename = "ssh_host_ecdsa_key" -) - -var defaultHostKeyDir = filepath.Join(".cloudflared", "host_keys") - -func (s *SSHProxy) configureHostKeys(hostKeyDir string) error { - if hostKeyDir == "" { - homeDir, err := os.UserHomeDir() - if err != nil { - return err - } - hostKeyDir = filepath.Join(homeDir, defaultHostKeyDir) - } - - if _, err := os.Stat(hostKeyDir); os.IsNotExist(err) { - if err := os.MkdirAll(hostKeyDir, 0755); err != nil { - return errors.Wrap(err, fmt.Sprintf("Error creating %s directory", hostKeyDir)) - } - } - - if err := s.configureECDSAKey(hostKeyDir); err != nil { - return err - } - - if err := s.configureRSAKey(hostKeyDir); err != nil { - return err - } - - return nil -} - -func (s *SSHProxy) configureRSAKey(basePath string) error { - keyPath := filepath.Join(basePath, rsaFilename) - if _, err := os.Stat(keyPath); os.IsNotExist(err) { - key, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return errors.Wrap(err, "Error generating RSA host key") - } - - privateKey := &pem.Block{ - Type: "RSA PRIVATE KEY", - Bytes: x509.MarshalPKCS1PrivateKey(key), - } - - if err = writePrivateKey(keyPath, privateKey); err != nil { - return err - } - - s.logger.Debugf("Created new RSA SSH host key: %s", keyPath) - } - if err := s.SetOption(ssh.HostKeyFile(keyPath)); err != nil { - return errors.Wrap(err, "Could not set SSH RSA host key") - } - return nil -} - -func (s *SSHProxy) configureECDSAKey(basePath string) error { - keyPath := filepath.Join(basePath, ecdsaFilename) - if _, err := os.Stat(keyPath); os.IsNotExist(err) { - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return errors.Wrap(err, "Error generating ECDSA host key") - } - - keyBytes, err := x509.MarshalECPrivateKey(key) - if err != nil { - return errors.Wrap(err, "Error marshalling ECDSA key") - } - - privateKey := &pem.Block{ - Type: "EC PRIVATE KEY", - Bytes: keyBytes, - } - - if err = writePrivateKey(keyPath, privateKey); err != nil { - return err - } - - s.logger.Debugf("Created new ECDSA SSH host key: %s", keyPath) - } - if err := s.SetOption(ssh.HostKeyFile(keyPath)); err != nil { - return errors.Wrap(err, "Could not set SSH ECDSA host key") - } - return nil -} - -func writePrivateKey(keyPath string, privateKey *pem.Block) error { - if err := ioutil.WriteFile(keyPath, pem.EncodeToMemory(privateKey), 0600); err != nil { - return errors.Wrap(err, fmt.Sprintf("Error writing host key to %s", keyPath)) - } - return nil -} diff --git a/sshserver/preamble_test.go b/sshserver/preamble_test.go deleted file mode 100644 index 04fd447e..00000000 --- a/sshserver/preamble_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package sshserver - -import ( - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "testing" -) - -func TestHasPort(t *testing.T) { - type testCase struct { - input string - expectedOutput string - } - - tests := []testCase{ - {"localhost", "localhost:22"}, - {"other.addr:22", "other.addr:22"}, - {"[2001:db8::1]:8080", "[2001:db8::1]:8080"}, - {"[::1]", "[::1]:22"}, - {"2001:0db8:3c4d:0015:0000:0000:1a2f:1234", "[2001:0db8:3c4d:0015:0000:0000:1a2f:1234]:22"}, - {"::1", "[::1]:22"}, - } - - for _, test := range tests { - out, err := canonicalizeDest(test.input) - require.Nil(t, err) - assert.Equal(t, test.expectedOutput, out) - } -} diff --git a/sshserver/sshserver_unix.go b/sshserver/sshserver_unix.go deleted file mode 100644 index 98f86e5c..00000000 --- a/sshserver/sshserver_unix.go +++ /dev/null @@ -1,491 +0,0 @@ -//+build !windows - -package sshserver - -import ( - "crypto/ecdsa" - "crypto/elliptic" - "crypto/rand" - "encoding/binary" - "encoding/json" - "fmt" - "io" - "net" - "runtime" - "strings" - "time" - - "github.com/cloudflare/cloudflared/logger" - "github.com/cloudflare/cloudflared/sshgen" - "github.com/cloudflare/cloudflared/sshlog" - "github.com/gliderlabs/ssh" - "github.com/google/uuid" - "github.com/pkg/errors" - gossh "golang.org/x/crypto/ssh" -) - -const ( - auditEventStart = "session_start" - auditEventStop = "session_stop" - auditEventExec = "exec" - auditEventScp = "scp" - auditEventResize = "resize" - auditEventShell = "shell" - sshContextSessionID = "sessionID" - sshContextEventLogger = "eventLogger" - sshContextPreamble = "sshPreamble" - sshContextSSHClient = "sshClient" - SSHPreambleLength = 2 - defaultSSHPort = "22" -) - -type auditEvent struct { - Event string `json:"event,omitempty"` - EventType string `json:"event_type,omitempty"` - SessionID string `json:"session_id,omitempty"` - User string `json:"user,omitempty"` - Login string `json:"login,omitempty"` - Datetime string `json:"datetime,omitempty"` - Hostname string `json:"hostname,omitempty"` - Destination string `json:"destination,omitempty"` -} - -// sshConn wraps the incoming net.Conn and a cleanup function -// This is done to allow the outgoing SSH client to be retrieved and closed when the conn itself is closed. -type sshConn struct { - net.Conn - cleanupFunc func() -} - -// close calls the cleanupFunc before closing the conn -func (c sshConn) Close() error { - c.cleanupFunc() - return c.Conn.Close() -} - -type SSHProxy struct { - ssh.Server - hostname string - logger logger.Service - shutdownC chan struct{} - caCert ssh.PublicKey - logManager sshlog.Manager -} - -type SSHPreamble struct { - Destination string - JWT string -} - -// New creates a new SSHProxy and configures its host keys and authentication by the data provided -func New(logManager sshlog.Manager, logger logger.Service, version, localAddress, hostname, hostKeyDir string, shutdownC chan struct{}, idleTimeout, maxTimeout time.Duration) (*SSHProxy, error) { - sshProxy := SSHProxy{ - hostname: hostname, - logger: logger, - shutdownC: shutdownC, - logManager: logManager, - } - - sshProxy.Server = ssh.Server{ - Addr: localAddress, - MaxTimeout: maxTimeout, - IdleTimeout: idleTimeout, - Version: fmt.Sprintf("SSH-2.0-Cloudflare-Access_%s_%s", version, runtime.GOOS), - PublicKeyHandler: sshProxy.proxyAuthCallback, - ConnCallback: sshProxy.connCallback, - ChannelHandlers: map[string]ssh.ChannelHandler{ - "default": sshProxy.channelHandler, - }, - } - - if err := sshProxy.configureHostKeys(hostKeyDir); err != nil { - return nil, err - } - - return &sshProxy, nil -} - -// Start the SSH proxy listener to start handling SSH connections from clients -func (s *SSHProxy) Start() error { - s.logger.Infof("Starting SSH server at %s", s.Addr) - - go func() { - <-s.shutdownC - if err := s.Close(); err != nil { - s.logger.Errorf("Cannot close SSH server: %s", err) - } - }() - - return s.ListenAndServe() -} - -// proxyAuthCallback attempts to connect to ultimate SSH destination. If successful, it allows the incoming connection -// to connect to the proxy and saves the outgoing SSH client to the context. Otherwise, no connection to the -// the proxy is allowed. -func (s *SSHProxy) proxyAuthCallback(ctx ssh.Context, key ssh.PublicKey) bool { - client, err := s.dialDestination(ctx) - if err != nil { - return false - } - ctx.SetValue(sshContextSSHClient, client) - return true -} - -// connCallback reads the preamble sent from the proxy server and saves an audit event logger to the context. -// If any errors occur, the connection is terminated by returning nil from the callback. -func (s *SSHProxy) connCallback(ctx ssh.Context, conn net.Conn) net.Conn { - // AUTH-2050: This is a temporary workaround of a timing issue in the tunnel muxer to allow further testing. - // TODO: Remove this - time.Sleep(10 * time.Millisecond) - - preamble, err := s.readPreamble(conn) - if err != nil { - if netErr, ok := err.(net.Error); ok && netErr.Timeout() { - s.logger.Info("Could not establish session. Client likely does not have --destination set and is using old-style ssh config") - } else if err != io.EOF { - s.logger.Errorf("failed to read SSH preamble: %s", err) - } - return nil - } - ctx.SetValue(sshContextPreamble, preamble) - - logger, sessionID, err := s.auditLogger() - if err != nil { - s.logger.Errorf("failed to configure logger: %s", err) - return nil - } - ctx.SetValue(sshContextEventLogger, logger) - ctx.SetValue(sshContextSessionID, sessionID) - - // attempts to retrieve and close the outgoing ssh client when the incoming conn is closed. - // If no client exists, the conn is being closed before the PublicKeyCallback was called (where the client is created). - cleanupFunc := func() { - client, ok := ctx.Value(sshContextSSHClient).(*gossh.Client) - if ok && client != nil { - client.Close() - } - } - - return sshConn{conn, cleanupFunc} -} - -// channelHandler proxies incoming and outgoing SSH traffic back and forth over an SSH Channel -func (s *SSHProxy) channelHandler(srv *ssh.Server, conn *gossh.ServerConn, newChan gossh.NewChannel, ctx ssh.Context) { - if newChan.ChannelType() != "session" && newChan.ChannelType() != "direct-tcpip" { - msg := fmt.Sprintf("channel type %s is not supported", newChan.ChannelType()) - s.logger.Info(msg) - if err := newChan.Reject(gossh.UnknownChannelType, msg); err != nil { - s.logger.Errorf("Error rejecting SSH channel: %s", err) - } - return - } - - localChan, localChanReqs, err := newChan.Accept() - if err != nil { - s.logger.Errorf("Failed to accept session channel: %s", err) - return - } - defer localChan.Close() - - // client will be closed when the sshConn is closed - client, ok := ctx.Value(sshContextSSHClient).(*gossh.Client) - if !ok { - s.logger.Error("Could not retrieve client from context") - return - } - - remoteChan, remoteChanReqs, err := client.OpenChannel(newChan.ChannelType(), newChan.ExtraData()) - if err != nil { - s.logger.Errorf("Failed to open remote channel: %s", err) - return - } - - defer remoteChan.Close() - - // Proxy ssh traffic back and forth between client and destination - s.proxyChannel(localChan, remoteChan, localChanReqs, remoteChanReqs, conn, ctx) -} - -// proxyChannel couples two SSH channels and proxies SSH traffic and channel requests back and forth. -func (s *SSHProxy) proxyChannel(localChan, remoteChan gossh.Channel, localChanReqs, remoteChanReqs <-chan *gossh.Request, conn *gossh.ServerConn, ctx ssh.Context) { - done := make(chan struct{}, 2) - go func() { - if _, err := io.Copy(localChan, remoteChan); err != nil { - s.logger.Errorf("remote to local copy error: %s", err) - } - done <- struct{}{} - }() - go func() { - if _, err := io.Copy(remoteChan, localChan); err != nil { - s.logger.Errorf("local to remote copy error: %s", err) - } - done <- struct{}{} - }() - - // stderr streams are used non-pty sessions since they have distinct IO streams. - remoteStderr := remoteChan.Stderr() - localStderr := localChan.Stderr() - go func() { - if _, err := io.Copy(remoteStderr, localStderr); err != nil { - s.logger.Errorf("stderr local to remote copy error: %s", err) - } - }() - go func() { - if _, err := io.Copy(localStderr, remoteStderr); err != nil { - s.logger.Errorf("stderr remote to local copy error: %s", err) - } - }() - - s.logAuditEvent(conn, "", auditEventStart, ctx) - defer s.logAuditEvent(conn, "", auditEventStop, ctx) - - // Proxy channel requests - for { - select { - case req := <-localChanReqs: - if req == nil { - return - } - if err := s.forwardChannelRequest(remoteChan, req); err != nil { - s.logger.Errorf("Failed to forward request: %s", err) - return - } - - s.logChannelRequest(req, conn, ctx) - - case req := <-remoteChanReqs: - if req == nil { - return - } - if err := s.forwardChannelRequest(localChan, req); err != nil { - s.logger.Errorf("Failed to forward request: %s", err) - return - } - case <-done: - return - } - } -} - - -// readPreamble reads a preamble from the SSH connection before any SSH traffic is sent. -// This preamble is a JSON encoded struct containing the users JWT and ultimate destination. -// The first 4 bytes contain the length of the preamble which follows immediately. -func (s *SSHProxy) readPreamble(conn net.Conn) (*SSHPreamble, error) { - // Set conn read deadline while reading preamble to prevent hangs if preamble wasnt sent. - if err := conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond)); err != nil { - return nil, errors.Wrap(err, "failed to set conn deadline") - } - defer func() { - if err := conn.SetReadDeadline(time.Time{}); err != nil { - s.logger.Errorf("Failed to unset conn read deadline: %s", err) - } - }() - - size := make([]byte, SSHPreambleLength) - if _, err := io.ReadFull(conn, size); err != nil { - return nil, err - } - payloadLength := binary.BigEndian.Uint16(size) - payload := make([]byte, payloadLength) - if _, err := io.ReadFull(conn, payload); err != nil { - return nil, err - } - - var preamble SSHPreamble - err := json.Unmarshal(payload, &preamble) - if err != nil { - return nil, err - } - - preamble.Destination, err = canonicalizeDest(preamble.Destination) - if err != nil { - return nil, err - } - return &preamble, nil -} - -// canonicalizeDest adds a default port if one doesnt exist -func canonicalizeDest(dest string) (string, error) { - _, _, err := net.SplitHostPort(dest) - // if host and port are split without error, a port exists. - if err != nil { - addrErr, ok := err.(*net.AddrError) - if !ok { - return "", err - } - // If the port is missing, append it. - if addrErr.Err == "missing port in address" { - return fmt.Sprintf("%s:%s", dest, defaultSSHPort), nil - } - - // If there are too many colons and address is IPv6, wrap in brackets and append port. Otherwise invalid address - ip := net.ParseIP(dest) - if addrErr.Err == "too many colons in address" && ip != nil && ip.To4() == nil { - return fmt.Sprintf("[%s]:%s", dest, defaultSSHPort), nil - } - return "", addrErr - } - - return dest, nil -} - -// dialDestination creates a new SSH client and dials the destination server -func (s *SSHProxy) dialDestination(ctx ssh.Context) (*gossh.Client, error) { - preamble, ok := ctx.Value(sshContextPreamble).(*SSHPreamble) - if !ok { - msg := "failed to retrieve SSH preamble from context" - s.logger.Error(msg) - return nil, errors.New(msg) - } - - signer, err := s.genSSHSigner(preamble.JWT) - if err != nil { - s.logger.Errorf("Failed to generate signed short lived cert: %s", err) - return nil, err - } - s.logger.Debugf("Short lived certificate for %s connecting to %s:\n\n%s", ctx.User(), preamble.Destination, gossh.MarshalAuthorizedKey(signer.PublicKey())) - - clientConfig := &gossh.ClientConfig{ - User: ctx.User(), - // AUTH-2103 TODO: proper host key check - HostKeyCallback: gossh.InsecureIgnoreHostKey(), - Auth: []gossh.AuthMethod{gossh.PublicKeys(signer)}, - ClientVersion: ctx.ServerVersion(), - } - - client, err := gossh.Dial("tcp", preamble.Destination, clientConfig) - if err != nil { - s.logger.Errorf("Failed to connect to destination SSH server: %s", err) - return nil, err - } - return client, nil -} - -// Generates a key pair and sends public key to get signed by CA -func (s *SSHProxy) genSSHSigner(jwt string) (gossh.Signer, error) { - key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) - if err != nil { - return nil, errors.Wrap(err, "failed to generate ecdsa key pair") - } - - pub, err := gossh.NewPublicKey(&key.PublicKey) - if err != nil { - return nil, errors.Wrap(err, "failed to convert ecdsa public key to SSH public key") - } - - pubBytes := gossh.MarshalAuthorizedKey(pub) - signedCertBytes, err := sshgen.SignCert(jwt, string(pubBytes)) - if err != nil { - return nil, errors.Wrap(err, "failed to retrieve cert from SSHCAAPI") - } - - signedPub, _, _, _, err := gossh.ParseAuthorizedKey([]byte(signedCertBytes)) - if err != nil { - return nil, errors.Wrap(err, "failed to parse SSH public key") - } - - cert, ok := signedPub.(*gossh.Certificate) - if !ok { - return nil, errors.Wrap(err, "failed to assert public key as certificate") - } - signer, err := gossh.NewSignerFromKey(key) - if err != nil { - return nil, errors.Wrap(err, "failed to create signer") - } - - certSigner, err := gossh.NewCertSigner(cert, signer) - if err != nil { - return nil, errors.Wrap(err, "failed to create cert signer") - } - return certSigner, nil -} - -// forwardChannelRequest sends request req to SSH channel sshChan, waits for reply, and sends the reply back. -func (s *SSHProxy) forwardChannelRequest(sshChan gossh.Channel, req *gossh.Request) error { - reply, err := sshChan.SendRequest(req.Type, req.WantReply, req.Payload) - if err != nil { - return errors.Wrap(err, "Failed to send request") - } - if err := req.Reply(reply, nil); err != nil { - return errors.Wrap(err, "Failed to reply to request") - } - return nil -} - -// logChannelRequest creates an audit log for different types of channel requests -func (s *SSHProxy) logChannelRequest(req *gossh.Request, conn *gossh.ServerConn, ctx ssh.Context) { - var eventType string - var event string - switch req.Type { - case "exec": - var payload struct{ Value string } - if err := gossh.Unmarshal(req.Payload, &payload); err != nil { - s.logger.Errorf("Failed to unmarshal channel request payload: %s:%s with error: %s", req.Type, req.Payload, err) - } - event = payload.Value - - eventType = auditEventExec - if strings.HasPrefix(string(req.Payload), "scp") { - eventType = auditEventScp - } - case "shell": - eventType = auditEventShell - case "window-change": - eventType = auditEventResize - default: - return - } - s.logAuditEvent(conn, event, eventType, ctx) -} - -func (s *SSHProxy) auditLogger() (io.WriteCloser, string, error) { - sessionUUID, err := uuid.NewRandom() - if err != nil { - return nil, "", errors.Wrap(err, "failed to create sessionID") - } - sessionID := sessionUUID.String() - - writer, err := s.logManager.NewLogger(fmt.Sprintf("%s-event.log", sessionID), s.logger) - if err != nil { - return nil, "", errors.Wrap(err, "failed to create logger") - } - return writer, sessionID, nil -} - -func (s *SSHProxy) logAuditEvent(conn *gossh.ServerConn, event, eventType string, ctx ssh.Context) { - sessionID, sessionIDOk := ctx.Value(sshContextSessionID).(string) - writer, writerOk := ctx.Value(sshContextEventLogger).(io.WriteCloser) - if !writerOk || !sessionIDOk { - s.logger.Error("Failed to retrieve audit logger from context") - return - } - - var destination string - preamble, ok := ctx.Value(sshContextPreamble).(*SSHPreamble) - if ok { - destination = preamble.Destination - } else { - s.logger.Error("Failed to retrieve SSH preamble from context") - } - - ae := auditEvent{ - Event: event, - EventType: eventType, - SessionID: sessionID, - User: conn.User(), - Login: conn.User(), - Datetime: time.Now().UTC().Format(time.RFC3339), - Hostname: s.hostname, - Destination: destination, - } - data, err := json.Marshal(&ae) - if err != nil { - s.logger.Errorf("Failed to marshal audit event. malformed audit object: %s", err) - return - } - line := string(data) + "\n" - if _, err := writer.Write([]byte(line)); err != nil { - s.logger.Errorf("Failed to write audit event: %s", err) - } -} diff --git a/sshserver/sshserver_windows.go b/sshserver/sshserver_windows.go deleted file mode 100644 index 7b338417..00000000 --- a/sshserver/sshserver_windows.go +++ /dev/null @@ -1,29 +0,0 @@ -//+build windows - -package sshserver - -import ( - "errors" - - "time" - - "github.com/cloudflare/cloudflared/logger" - "github.com/cloudflare/cloudflared/sshlog" -) - -const SSHPreambleLength = 2 - -type SSHServer struct{} - -type SSHPreamble struct { - Destination string - JWT string -} - -func New(_ sshlog.Manager, _ logger.Service, _, _, _, _ string, _ chan struct{}, _, _ time.Duration) (*SSHServer, error) { - return nil, errors.New("cloudflared ssh server is not supported on windows") -} - -func (s *SSHServer) Start() error { - return errors.New("cloudflared ssh server is not supported on windows") -} diff --git a/sshserver/testdata/ca b/sshserver/testdata/ca deleted file mode 100644 index 5e3f830c..00000000 --- a/sshserver/testdata/ca +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN OPENSSH PRIVATE KEY----- -b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn -NhAAAAAwEAAQAAAQEA0c6EklYvC9B041qEGWDNuot6G4tTVm9LCQC0vA+v2n25ru9CINV6 -8IljmXBORXBwfG6PdLhg0SEabZUbsNX5WrIVbGovcghKS6GRsqI5+Quhm+o8eG042JE/hB -oYdZ19TcMEyPOGzHsx0U/BSN9ZJWVCxqN51iI6qyhz9f6jlX2LQBFEvXlhxgF3owBEf8UC -Zt/UvbZdmeeyKNQElPmiVLIJEAPCueECp7a2mjCiP3zqjDvSeeGk4CelB/1qZZ4V2n7fvb -HZjAB5JJs4KXs5o8KgvQnqgQMxiLFZ4PATt4+mxEzh4JymppbqJOo2rYwOA3TAIEWWtYRV -/ZKJ0AyhhQAAA8gciO8XHIjvFwAAAAdzc2gtcnNhAAABAQDRzoSSVi8L0HTjWoQZYM26i3 -obi1NWb0sJALS8D6/afbmu70Ig1XrwiWOZcE5FcHB8bo90uGDRIRptlRuw1flashVsai9y -CEpLoZGyojn5C6Gb6jx4bTjYkT+EGhh1nX1NwwTI84bMezHRT8FI31klZULGo3nWIjqrKH -P1/qOVfYtAEUS9eWHGAXejAER/xQJm39S9tl2Z57Io1ASU+aJUsgkQA8K54QKntraaMKI/ -fOqMO9J54aTgJ6UH/WplnhXaft+9sdmMAHkkmzgpezmjwqC9CeqBAzGIsVng8BO3j6bETO -HgnKamluok6jatjA4DdMAgRZa1hFX9konQDKGFAAAAAwEAAQAAAQEApVzGdKhk8ETevAst -rurze6JPHcKUbr3NQE1EJi2fBvCtF0oQrtxTx54h2GAB8Q0MO6bQfsiL1ojm0ZQCfUBJBs -jxxb9zoccS98Vilo7ybm5SdBcMjkZX1am1jCMdQCZfCpk4/kGi7yvyOe1IhG01UBodpX5X -mwTjhN+fdjW7LSiW6cKPClN49CZKgmtvI27FCt+/TtMzdCXOiJxJ4yZCzCRhSgssV0gWI1 -0VJr/MHirKUvv/qCLAuOBxIr9UgdduRZUpNX+KS2rfhFEbjnUqc/57aAakpQmuPB5I+s9G -DnrF0HSHpq7u1XC1SvYlnFBN/0A7Hw/MX2SaBFH7mc9AAQAAAIAFuTHr6O8tCvWEawfxC0 -qiAPQ+Yy1vthq5uewmuQujMutUnc9JAUl32PdU1DbS7APC1Dg9XL7SyAB6A+ZpRJRAKgCY -SneAKE6hOytH+yM206aekrz6VuZiSpBqpfEqDibVAaZIO8sv/9dtZd6kWemxNErPQoKJey -Z7/cuWUWQovAAAAIEA6ugIlVj1irPmElyCCt5YfPv2x8Dl54ELoP/WsffsrPHNQog64hFd -ahD7Wq63TA566bN85fkx8OVU5TbbEQmkHgOEV6nDRY2YsBSqIOblA/KehtfdUIqZB0iNBh -Gn6TV/z6HwnSR3gKv4b66Gveek6LfRAG3mbsLCgyRAbYgn6YUAAACBAOSlf+n1eh6yjtvF -Zecq3Zslj7O8cUs17PQx4vQ7sXNCFrIZdevWPIn9sVrt7/hsTrXunDz6eXCeclB35KZe3H -WPVjRoD+xnr5+sXx2qXOnKCR0LdFybso6IR5bXAI6DNSNfP7D9LPEQ+R73Jk0jPuLYzocS -iM89KZiuGpzr01gBAAAAEW1pa2VAQzAyWTUwVEdKR0g4AQ== ------END OPENSSH PRIVATE KEY----- diff --git a/sshserver/testdata/ca.pub b/sshserver/testdata/ca.pub deleted file mode 100644 index 18c338d5..00000000 --- a/sshserver/testdata/ca.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDRzoSSVi8L0HTjWoQZYM26i3obi1NWb0sJALS8D6/afbmu70Ig1XrwiWOZcE5FcHB8bo90uGDRIRptlRuw1flashVsai9yCEpLoZGyojn5C6Gb6jx4bTjYkT+EGhh1nX1NwwTI84bMezHRT8FI31klZULGo3nWIjqrKHP1/qOVfYtAEUS9eWHGAXejAER/xQJm39S9tl2Z57Io1ASU+aJUsgkQA8K54QKntraaMKI/fOqMO9J54aTgJ6UH/WplnhXaft+9sdmMAHkkmzgpezmjwqC9CeqBAzGIsVng8BO3j6bETOHgnKamluok6jatjA4DdMAgRZa1hFX9konQDKGF mike@C02Y50TGJGH8 diff --git a/sshserver/testdata/id_rsa b/sshserver/testdata/id_rsa deleted file mode 100644 index a8923737..00000000 --- a/sshserver/testdata/id_rsa +++ /dev/null @@ -1,49 +0,0 @@ ------BEGIN OPENSSH PRIVATE KEY----- -b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn -NhAAAAAwEAAQAAAgEA60Kneo87qPsh+zErWFl7vx93c7fyTxbZ9lUNqafgXy/BLOCc/nQS -McosVSLsQrbHlhYzfmZEhTiubmuYUrHchmsn1ml1HIqP8T5aDgtNbLqYnS4H5oO4Sj1+XH -lQtU7n7zHXgca9SnMWt1Fhkx1mvkeiOKs0eq7hV2TuIZxfmbYfIVvJGwrL0uWzbSEE1gvx -gTXZHxEChIQyrNviljgi4u2MD/cIi6KMeYUnaTL1FxO9G4GIFiy7ueHRwOZPIFHgYm+Vrt -X7XafSF0///zCrC63zzWt/6A06hFepOz2VXvm7SdckaR7qMXAb7kipsc0+dKk9ggU7Fqpx -ZY5cVeZo9RlRVhRXGDy7mABA/FMwvv+qYCgJ3nlZbdKbaiPLQu8ScTlJ9sMI06/ZiEY04b -meZ0ASM52gaDGjrFbbnuHNf5XV/oreEUhtCrryFnoIxmKgHznGjZ55q77FtTHnrAKFmKFP -11s3MLIX9o4RgtriOtl4KenkIfUumgtrwY/UGjOaOQUOrVH1am54wkUiVEF0Qd3AD8KCl/ -l/xT5+t6cOspZ9GIhwa2NBmRjN/wVGp+Yrb08Re3kxPCX9bs5iLe+kHN0vuFr7RDo+eUoi -SPhWl6FUqx2W9NZqekmEgKn3oKrfbGaMH1VLkaKWlzQ4xJzP0iadQbIXGryLEYASydemZt -sAAAdQ/ovjxf6L48UAAAAHc3NoLXJzYQAAAgEA60Kneo87qPsh+zErWFl7vx93c7fyTxbZ -9lUNqafgXy/BLOCc/nQSMcosVSLsQrbHlhYzfmZEhTiubmuYUrHchmsn1ml1HIqP8T5aDg -tNbLqYnS4H5oO4Sj1+XHlQtU7n7zHXgca9SnMWt1Fhkx1mvkeiOKs0eq7hV2TuIZxfmbYf -IVvJGwrL0uWzbSEE1gvxgTXZHxEChIQyrNviljgi4u2MD/cIi6KMeYUnaTL1FxO9G4GIFi -y7ueHRwOZPIFHgYm+VrtX7XafSF0///zCrC63zzWt/6A06hFepOz2VXvm7SdckaR7qMXAb -7kipsc0+dKk9ggU7FqpxZY5cVeZo9RlRVhRXGDy7mABA/FMwvv+qYCgJ3nlZbdKbaiPLQu -8ScTlJ9sMI06/ZiEY04bmeZ0ASM52gaDGjrFbbnuHNf5XV/oreEUhtCrryFnoIxmKgHznG -jZ55q77FtTHnrAKFmKFP11s3MLIX9o4RgtriOtl4KenkIfUumgtrwY/UGjOaOQUOrVH1am -54wkUiVEF0Qd3AD8KCl/l/xT5+t6cOspZ9GIhwa2NBmRjN/wVGp+Yrb08Re3kxPCX9bs5i -Le+kHN0vuFr7RDo+eUoiSPhWl6FUqx2W9NZqekmEgKn3oKrfbGaMH1VLkaKWlzQ4xJzP0i -adQbIXGryLEYASydemZtsAAAADAQABAAACABUYzBYEhDAaHSj+dsmcdKll8/tPko4fGXqq -k+gT4t4GVUdl+Q4kcIFAhQs5b4BoDava39FE8H4V4CaMxYMc6g6vy0nB+TuO/Wt/0OmTf+ -TxMsBdoV29kCgwLYWzZ1Zq9geQK6g6nzzu5ymXRa3ApDcKC3UTfUhHKHQC3AvtjvEk0NPX -/EfNhwuph5aQsHNVbNnOb2MGznf9tuGjckVQUWiSLs47s+t5rykylJ8tb6cbIQk3a3G5nz -gDFSE8Rfo6/Wk2YnDkRX9XjlKC3Q0QWzZX6hYQvs6baRT3G3jxg9SZhn8PqPc4S34VdJvA -rl8AbcpeZuKi/3J/5F1cD9GwMNcl4gM87piF20/r9mMvC4zBAEgyF8WBi4OjSu0+ccsEsb -GSpxKK04OPTB7p8mLJ8hQUiREg5OuPEEcAoDSuHgdliE7nDHzuImbpTcAZcWhkJaUdBWI6 -qcnGPARzxAOmuzkY8Gq0MtcWge5QxnLWJyrfy43M984Cvxql/maLUij4eTbMDDwV7Qx30V -P2tJp5+hOnitRwB6cQIg5N7/cTQdJ6eiFYuw0v3IfHjYmaolY8F3u38Zv2PPk50CorPRDG -esx0a9Elm2UKPb145MtHGZtLH2mayRnDjnxr25iLwgokI06tCLCNvbkYLA7wVpJn81eKmZ -tQBtbfqBSiDiLjCrehAAABAQDh8vmgPR95Kx1DeBxzMSja7TStP5V58JcUvLP4dAN6zqtt -rMuKDfJRSIVYGkU8vXlME5E6phWP7R5lYY7+kLDbeZrYQg1AxkK8y4fUYkCLBuEcCjzWDK -oqZQNskk4urbCdBIP6AR7d/LMCHBb9rk2iOuUeos6JHRKbPGP1lvH3hLkbH9CA0F41sz86 -JFg6u/XaRQ2CyhS7y7SQ8dmaANGz9LGdIRqIoZ8Hfht8t1VRbM9fzSb3xoxUItbHpk9R9g -GZsHSryi7AtRmHt0uBrWIv6RbIY0epCbjdCLvHflbkPgwBM7UndgkOSIwQ4SQF8Fs+e9/r -hV05h0Y81vd1RZvOAAABAQD5EgW3SpmYzeMmiP7MKkfIlbZtwVmRu4anTzWxlk5pJ9GXnC -QoInULCipWAOeJbuLIgRWLU4VzhOUbYLNKQPXECARfgoto2VXoXZZ2q2O4aXaCpeyU6nE8 -VKbp4nU1jEg5hWB3PRwZ8Pzs4A93/9mrpVzLmCT+LW9Rlnp6tTpqcUKGugg8vr64SSgqnV -ZFyQgHgw+ZGOG9w714urS3U97WNTeHXAs0p2YBOu5XW3JQ3jkRo7YyZF3+TtBxbgfHRZfH -O2mFcMBD3Sn4t+LAbgnLye3S2/WZf/gQwdVB7BgrVqguzQ2hGoOxNiwadkIDsxb6r/u3n6 -2lScpHFDS0WnpRAAABAQDxzkV52VX6wAWkQe/2KFH9wTG0XFANmZUnnTPR8wd+b9E7HIr0 -Mdd8iAHOhLRvTy8mih53GGBptXK7GdABMZtkqDErbXhuC8xbi9uRLEHiRe/oBfWr8vYIZY -awiw3/EqxaTv0HBMicdr2S31Bs2/mjrVuJH0wAaI9ueQnZizzjgWuzeNZMWq1qk0akUUdm -PDVd58yBkt8lKlkOG0LJAn6JEG9oH9XiTFShHzu1dQmoC2bKVHdxL8WCcYFVtmyoMRcLZq -u6d4nyKha02cYZB5hM3VcizJI5HY/A+H3fBkRR0hXgkU5R89w+8x9VSJkNVx+JGC7ziK4a -kUjfOmR5WBdrAAAAE3Rlc3RAY2xvdWRmbGFyZS5jb20BAgMEBQYH ------END OPENSSH PRIVATE KEY----- diff --git a/sshserver/testdata/id_rsa-cert.pub b/sshserver/testdata/id_rsa-cert.pub deleted file mode 100644 index 961868a2..00000000 --- a/sshserver/testdata/id_rsa-cert.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgOsuFqKdzp/nC3wQfKVJBdHa8axtGryKplPkDjdSXT4kAAAADAQABAAACAQDrQqd6jzuo+yH7MStYWXu/H3dzt/JPFtn2VQ2pp+BfL8Es4Jz+dBIxyixVIuxCtseWFjN+ZkSFOK5ua5hSsdyGayfWaXUcio/xPloOC01supidLgfmg7hKPX5ceVC1TufvMdeBxr1Kcxa3UWGTHWa+R6I4qzR6ruFXZO4hnF+Zth8hW8kbCsvS5bNtIQTWC/GBNdkfEQKEhDKs2+KWOCLi7YwP9wiLoox5hSdpMvUXE70bgYgWLLu54dHA5k8gUeBib5Wu1ftdp9IXT///MKsLrfPNa3/oDTqEV6k7PZVe+btJ1yRpHuoxcBvuSKmxzT50qT2CBTsWqnFljlxV5mj1GVFWFFcYPLuYAED8UzC+/6pgKAneeVlt0ptqI8tC7xJxOUn2wwjTr9mIRjThuZ5nQBIznaBoMaOsVtue4c1/ldX+it4RSG0KuvIWegjGYqAfOcaNnnmrvsW1MeesAoWYoU/XWzcwshf2jhGC2uI62Xgp6eQh9S6aC2vBj9QaM5o5BQ6tUfVqbnjCRSJUQXRB3cAPwoKX+X/FPn63pw6yln0YiHBrY0GZGM3/BUan5itvTxF7eTE8Jf1uzmIt76Qc3S+4WvtEOj55SiJI+FaXoVSrHZb01mp6SYSAqfegqt9sZowfVUuRopaXNDjEnM/SJp1BshcavIsRgBLJ16Zm2wAAAAAAAAAAAAAAAQAAAA10ZXN0VXNlckB0ZXN0AAAADAAAAAh0ZXN0VXNlcgAAAAAAAAAA//////////8AAAAAAAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEA0c6EklYvC9B041qEGWDNuot6G4tTVm9LCQC0vA+v2n25ru9CINV68IljmXBORXBwfG6PdLhg0SEabZUbsNX5WrIVbGovcghKS6GRsqI5+Quhm+o8eG042JE/hBoYdZ19TcMEyPOGzHsx0U/BSN9ZJWVCxqN51iI6qyhz9f6jlX2LQBFEvXlhxgF3owBEf8UCZt/UvbZdmeeyKNQElPmiVLIJEAPCueECp7a2mjCiP3zqjDvSeeGk4CelB/1qZZ4V2n7fvbHZjAB5JJs4KXs5o8KgvQnqgQMxiLFZ4PATt4+mxEzh4JymppbqJOo2rYwOA3TAIEWWtYRV/ZKJ0AyhhQAAAQ8AAAAHc3NoLXJzYQAAAQC2lL+6JYTGOdz1zNnck6onrFcVpO2onCVAKP8HdLoCeH0/upIugaCocPKuzoURYEfiHQotviNeprE/2CyAroJ5VBdqWftEeHn3FFvBCQ1gwRQ7oci4C5n72t0vjWWE6WBylS0RqpJjr6EQ8a1vuwIqAQrEJPp2yNLjRH2WD7eicBh5f43VKOMr73DtyTh4xoF0C2sNBROudt58npTaYqRHQgoI25V/aCmuYBgM3wdAGcoEZGoSerMfhID7GcWkvemq2hF8mQsspG3zgnyQXk+ahagmefzxutDnr3KdrZ637La0/XwABvBZ9L4l5RiEilVI1Shl96F2qbBW2YZ64pUQ test@cloudflare.com diff --git a/sshserver/testdata/id_rsa.pub b/sshserver/testdata/id_rsa.pub deleted file mode 100644 index 842a259f..00000000 --- a/sshserver/testdata/id_rsa.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDrQqd6jzuo+yH7MStYWXu/H3dzt/JPFtn2VQ2pp+BfL8Es4Jz+dBIxyixVIuxCtseWFjN+ZkSFOK5ua5hSsdyGayfWaXUcio/xPloOC01supidLgfmg7hKPX5ceVC1TufvMdeBxr1Kcxa3UWGTHWa+R6I4qzR6ruFXZO4hnF+Zth8hW8kbCsvS5bNtIQTWC/GBNdkfEQKEhDKs2+KWOCLi7YwP9wiLoox5hSdpMvUXE70bgYgWLLu54dHA5k8gUeBib5Wu1ftdp9IXT///MKsLrfPNa3/oDTqEV6k7PZVe+btJ1yRpHuoxcBvuSKmxzT50qT2CBTsWqnFljlxV5mj1GVFWFFcYPLuYAED8UzC+/6pgKAneeVlt0ptqI8tC7xJxOUn2wwjTr9mIRjThuZ5nQBIznaBoMaOsVtue4c1/ldX+it4RSG0KuvIWegjGYqAfOcaNnnmrvsW1MeesAoWYoU/XWzcwshf2jhGC2uI62Xgp6eQh9S6aC2vBj9QaM5o5BQ6tUfVqbnjCRSJUQXRB3cAPwoKX+X/FPn63pw6yln0YiHBrY0GZGM3/BUan5itvTxF7eTE8Jf1uzmIt76Qc3S+4WvtEOj55SiJI+FaXoVSrHZb01mp6SYSAqfegqt9sZowfVUuRopaXNDjEnM/SJp1BshcavIsRgBLJ16Zm2w== test@cloudflare.com diff --git a/sshserver/testdata/other_ca b/sshserver/testdata/other_ca deleted file mode 100644 index c79d6a2e..00000000 --- a/sshserver/testdata/other_ca +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN OPENSSH PRIVATE KEY----- -b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn -NhAAAAAwEAAQAAAQEAzBO7TXxbpk7sGQm/Wa29N/NFe5uuoEQGC5hxfihmcvVgeKeNKiSS -snxzCE1Y6SmNMoE4aQs92wtcn48GmxRwZSXbCqLq2CJrHfe9B2k3aPkJZpQkFMshcJGo7p -G0Vlo7dWAbYf99/YKddf290uLK7vxw9ty0pM1hXSXHNShv1b+bTQm/COMZ5jNsncjc1yBH -KGkFVHee9Dh4Z0xLlHipIyyNXXzI0RFYuHSNJz9GD310XQLIIroptr7+/7g6+sPPGsNlI+ -95OScba1/PQ2b/qy+KyIwNIMSd9ziJy5xnO7Vo3LrqQrza1Pkn2i29PljUcbc/F0hhXNIq -ITdNWwVqsQAAA8iKllTIipZUyAAAAAdzc2gtcnNhAAABAQDME7tNfFumTuwZCb9Zrb0380 -V7m66gRAYLmHF+KGZy9WB4p40qJJKyfHMITVjpKY0ygThpCz3bC1yfjwabFHBlJdsKourY -Imsd970HaTdo+QlmlCQUyyFwkajukbRWWjt1YBth/339gp11/b3S4sru/HD23LSkzWFdJc -c1KG/Vv5tNCb8I4xnmM2ydyNzXIEcoaQVUd570OHhnTEuUeKkjLI1dfMjREVi4dI0nP0YP -fXRdAsgiuim2vv7/uDr6w88aw2Uj73k5JxtrX89DZv+rL4rIjA0gxJ33OInLnGc7tWjcuu -pCvNrU+SfaLb0+WNRxtz8XSGFc0iohN01bBWqxAAAAAwEAAQAAAQAKEtNFEOVpQS4QUlXa -tGPJtj1wy4+EI7d0rRK1GoNsG0amzgZ+1Q1UuCXpe//uinmIy64gKUjlXhs1WRcHYqvlok -e8r6wN/Szybr8q9Xuht+FJ6fgZ+qjs6JPBKvoO5SdYNOVFIhpzABaLs3nCRiWkRFvDI8Pa -+rRap7m8mwFiOJtmdiIZYFxzw6xXwTsGCrWPKgTv3FKGZzXnCB9i7jC2vwT1MDYbcnzEH4 -Ba4dxI8bp6WWEX0biRIXj3jCtLb5gisNTSxdZs254Syh75HEXunSh2YO+yVSWQtZj19ewW -6Rb1Z3x5rVfXcgSkg7gZd9EpbckIIg6+MFSH3wdGW6atAAAAgQDFXiMuNd4ZYwdyhjlM5n -nFqQDXGgnwyNdiIqAapoqTdF5aZwNnbTU0fCFaDMLCQAHgntcgCEsW9A4HzDzYhOABKElv -j973vXWF165wFiZwuKSfroq/6JH6CiIcjiqpszbnqSOzy1hq913RWILS6e9yMjxRv8PUjm -E+IkcnfcFUwAAAAIEA+jwI3ICe8PGEIezV2tvQFeQy2Z2wGslu1yvqfTYEztSmtygns3wn -ZBb+cBXCnpqUCtznG7hZhq7I4m1I47BYznULwwFiBTVtBASG5wNP7zeVKTVZ4SKprze+Fe -I/nUZDJ5Q26um7eDbhvZ/n95GY+fucMVHoSBfX1wE16XBfp88AAACBANDHcgC4qP2oyOw/ -+p9HineMQd/ppG3fePe07jyZXLHLf0rByFveFgRAQ1m77O7FtP3fFKy3Y9nNy18LGq35ZK -Blsz2B23bO8NuffgAhchDG7KzKFXCo+AraIj5znp/znK5zIkaiiSOQaYywJ36EooYVpRtj -ep5ap6bBFDZ2e+V/AAAAEW1pa2VAQzAyWTUwVEdKR0g4AQ== ------END OPENSSH PRIVATE KEY----- diff --git a/sshserver/testdata/other_ca.pub b/sshserver/testdata/other_ca.pub deleted file mode 100644 index 7ba45831..00000000 --- a/sshserver/testdata/other_ca.pub +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDME7tNfFumTuwZCb9Zrb0380V7m66gRAYLmHF+KGZy9WB4p40qJJKyfHMITVjpKY0ygThpCz3bC1yfjwabFHBlJdsKourYImsd970HaTdo+QlmlCQUyyFwkajukbRWWjt1YBth/339gp11/b3S4sru/HD23LSkzWFdJcc1KG/Vv5tNCb8I4xnmM2ydyNzXIEcoaQVUd570OHhnTEuUeKkjLI1dfMjREVi4dI0nP0YPfXRdAsgiuim2vv7/uDr6w88aw2Uj73k5JxtrX89DZv+rL4rIjA0gxJ33OInLnGc7tWjcuupCvNrU+SfaLb0+WNRxtz8XSGFc0iohN01bBWqx mike@C02Y50TGJGH8 diff --git a/tlsconfig/certreloader.go b/tlsconfig/certreloader.go index 1a43298f..9440f99f 100644 --- a/tlsconfig/certreloader.go +++ b/tlsconfig/certreloader.go @@ -8,9 +8,9 @@ import ( "runtime" "sync" - "github.com/cloudflare/cloudflared/logger" "github.com/getsentry/raven-go" "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/urfave/cli/v2" ) @@ -63,7 +63,7 @@ func (cr *CertReloader) LoadCert() error { return nil } -func LoadOriginCA(originCAPoolFilename string, logger logger.Service) (*x509.CertPool, error) { +func LoadOriginCA(originCAPoolFilename string, log *zerolog.Logger) (*x509.CertPool, error) { var originCustomCAPool []byte if originCAPoolFilename != "" { @@ -74,14 +74,14 @@ func LoadOriginCA(originCAPoolFilename string, logger logger.Service) (*x509.Cer } } - originCertPool, err := loadOriginCertPool(originCustomCAPool, logger) + originCertPool, err := loadOriginCertPool(originCustomCAPool, log) if err != nil { return nil, errors.Wrap(err, "error loading the certificate pool") } // Windows users should be notified that they can use the flag if runtime.GOOS == "windows" && originCAPoolFilename == "" { - logger.Infof("cloudflared does not support loading the system root certificate pool on Windows. Please use the --%s to specify it", OriginCAPoolFlag) + log.Info().Msgf("cloudflared does not support loading the system root certificate pool on Windows. Please use the --%s to specify it", OriginCAPoolFlag) } return originCertPool, nil @@ -148,9 +148,9 @@ func CreateTunnelConfig(c *cli.Context, serverName string) (*tls.Config, error) return tlsConfig, nil } -func loadOriginCertPool(originCAPoolPEM []byte, logger logger.Service) (*x509.CertPool, error) { +func loadOriginCertPool(originCAPoolPEM []byte, log *zerolog.Logger) (*x509.CertPool, error) { // Get the global pool - certPool, err := loadGlobalCertPool(logger) + certPool, err := loadGlobalCertPool(log) if err != nil { return nil, err } @@ -158,19 +158,19 @@ func loadOriginCertPool(originCAPoolPEM []byte, logger logger.Service) (*x509.Ce // Then, add any custom origin CA pool the user may have passed if originCAPoolPEM != nil { if !certPool.AppendCertsFromPEM(originCAPoolPEM) { - logger.Info("could not append the provided origin CA to the cloudflared certificate pool") + log.Info().Msg("could not append the provided origin CA to the cloudflared certificate pool") } } return certPool, nil } -func loadGlobalCertPool(logger logger.Service) (*x509.CertPool, error) { +func loadGlobalCertPool(log *zerolog.Logger) (*x509.CertPool, error) { // First, obtain the system certificate pool certPool, err := x509.SystemCertPool() if err != nil { if runtime.GOOS != "windows" { // See https://github.com/golang/go/issues/16736 - logger.Infof("error obtaining the system certificates: %s", err) + log.Info().Msgf("error obtaining the system certificates: %s", err) } certPool = x509.NewCertPool() } diff --git a/tunneldns/https_upstream.go b/tunneldns/https_upstream.go index b4c756fa..1b43fd23 100644 --- a/tunneldns/https_upstream.go +++ b/tunneldns/https_upstream.go @@ -11,9 +11,9 @@ import ( "net/url" "time" - "github.com/cloudflare/cloudflared/logger" "github.com/miekg/dns" "github.com/pkg/errors" + "github.com/rs/zerolog" "golang.org/x/net/http2" ) @@ -26,16 +26,16 @@ type UpstreamHTTPS struct { client *http.Client endpoint *url.URL bootstraps []string - logger logger.Service + log *zerolog.Logger } // NewUpstreamHTTPS creates a new DNS over HTTPS upstream from endpoint -func NewUpstreamHTTPS(endpoint string, bootstraps []string, logger logger.Service) (Upstream, error) { +func NewUpstreamHTTPS(endpoint string, bootstraps []string, log *zerolog.Logger) (Upstream, error) { u, err := url.Parse(endpoint) if err != nil { return nil, err } - return &UpstreamHTTPS{client: configureClient(u.Hostname()), endpoint: u, bootstraps: bootstraps, logger: logger}, nil + return &UpstreamHTTPS{client: configureClient(u.Hostname()), endpoint: u, bootstraps: bootstraps, log: log}, nil } // Exchange provides an implementation for the Upstream interface @@ -49,12 +49,12 @@ func (u *UpstreamHTTPS) Exchange(ctx context.Context, query *dns.Msg) (*dns.Msg, for _, bootstrap := range u.bootstraps { endpoint, client, err := configureBootstrap(bootstrap) if err != nil { - u.logger.Errorf("failed to configure boostrap upstream %s: %s", bootstrap, err) + u.log.Error().Msgf("failed to configure boostrap upstream %s: %s", bootstrap, err) continue } - msg, err := exchange(queryBuf, query.Id, endpoint, client, u.logger) + msg, err := exchange(queryBuf, query.Id, endpoint, client, u.log) if err != nil { - u.logger.Errorf("failed to connect to a boostrap upstream %s: %s", bootstrap, err) + u.log.Error().Msgf("failed to connect to a boostrap upstream %s: %s", bootstrap, err) continue } return msg, nil @@ -62,10 +62,10 @@ func (u *UpstreamHTTPS) Exchange(ctx context.Context, query *dns.Msg) (*dns.Msg, return nil, fmt.Errorf("failed to reach any bootstrap upstream: %v", u.bootstraps) } - return exchange(queryBuf, query.Id, u.endpoint, u.client, u.logger) + return exchange(queryBuf, query.Id, u.endpoint, u.client, u.log) } -func exchange(msg []byte, queryID uint16, endpoint *url.URL, client *http.Client, logger logger.Service) (*dns.Msg, error) { +func exchange(msg []byte, queryID uint16, endpoint *url.URL, client *http.Client, log *zerolog.Logger) (*dns.Msg, error) { // No content negotiation for now, use DNS wire format buf, backendErr := exchangeWireformat(msg, endpoint, client) if backendErr == nil { @@ -78,7 +78,7 @@ func exchange(msg []byte, queryID uint16, endpoint *url.URL, client *http.Client return response, nil } - logger.Errorf("failed to connect to an HTTPS backend %q: %s", endpoint, backendErr) + log.Error().Msgf("failed to connect to an HTTPS backend %q: %s", endpoint, backendErr) return nil, backendErr } @@ -128,14 +128,14 @@ func configureBootstrap(bootstrap string) (*url.URL, *http.Client, error) { // configureClient will configure a HTTPS client for upstream DoH requests func configureClient(hostname string) *http.Client { // Update TLS and HTTP client configuration - tls := &tls.Config{ServerName: hostname} + tlsConfig := &tls.Config{ServerName: hostname} transport := &http.Transport{ - TLSClientConfig: tls, + TLSClientConfig: tlsConfig, DisableCompression: true, MaxIdleConns: 1, Proxy: http.ProxyFromEnvironment, } - http2.ConfigureTransport(transport) + _ = http2.ConfigureTransport(transport) return &http.Client{ Timeout: defaultTimeout, diff --git a/tunneldns/tunnel.go b/tunneldns/tunnel.go index d18b96ab..9927ae3f 100644 --- a/tunneldns/tunnel.go +++ b/tunneldns/tunnel.go @@ -16,6 +16,7 @@ import ( "github.com/coredns/coredns/plugin" "github.com/coredns/coredns/plugin/cache" "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/urfave/cli/v2" ) @@ -23,7 +24,7 @@ import ( type Listener struct { server *dnsserver.Server wg sync.WaitGroup - logger logger.Service + log *zerolog.Logger } func Command(hidden bool) *cli.Command { @@ -70,21 +71,18 @@ func Command(hidden bool) *cli.Command { // Run implements a foreground runner func Run(c *cli.Context) error { - logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) - if err != nil { - return cliutil.PrintLoggerSetupError("error setting up logger", err) - } + log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) metricsListener, err := net.Listen("tcp", c.String("metrics")) if err != nil { - logger.Fatalf("Failed to open the metrics listener: %s", err) + log.Fatal().Msgf("Failed to open the metrics listener: %s", err) } - go metrics.ServeMetrics(metricsListener, nil, nil, logger) + go metrics.ServeMetrics(metricsListener, nil, nil, log) - listener, err := CreateListener(c.String("address"), uint16(c.Uint("port")), c.StringSlice("upstream"), c.StringSlice("bootstrap"), logger) + listener, err := CreateListener(c.String("address"), uint16(c.Uint("port")), c.StringSlice("upstream"), c.StringSlice("bootstrap"), log) if err != nil { - logger.Errorf("Failed to create the listeners: %s", err) + log.Error().Msgf("Failed to create the listeners: %s", err) return err } @@ -92,7 +90,7 @@ func Run(c *cli.Context) error { readySignal := make(chan struct{}) err = listener.Start(readySignal) if err != nil { - logger.Errorf("Failed to start the listeners: %s", err) + log.Error().Msgf("Failed to start the listeners: %s", err) return listener.Stop() } <-readySignal @@ -106,7 +104,7 @@ func Run(c *cli.Context) error { // Shut down server err = listener.Stop() if err != nil { - logger.Errorf("failed to stop: %s", err) + log.Error().Msgf("failed to stop: %s", err) } return err } @@ -127,13 +125,13 @@ func createConfig(address string, port uint16, p plugin.Handler) *dnsserver.Conf // Start blocks for serving requests func (l *Listener) Start(readySignal chan struct{}) error { defer close(readySignal) - l.logger.Infof("Starting DNS over HTTPS proxy server on: %s", l.server.Address()) + l.log.Info().Msgf("Starting DNS over HTTPS proxy server on: %s", l.server.Address()) // Start UDP listener if udp, err := l.server.ListenPacket(); err == nil { l.wg.Add(1) go func() { - l.server.ServePacket(udp) + _ = l.server.ServePacket(udp) l.wg.Done() }() } else { @@ -145,7 +143,7 @@ func (l *Listener) Start(readySignal chan struct{}) error { if err == nil { l.wg.Add(1) go func() { - l.server.Serve(tcp) + _ = l.server.Serve(tcp) l.wg.Done() }() } @@ -164,12 +162,12 @@ func (l *Listener) Stop() error { } // CreateListener configures the server and bound sockets -func CreateListener(address string, port uint16, upstreams []string, bootstraps []string, logger logger.Service) (*Listener, error) { +func CreateListener(address string, port uint16, upstreams []string, bootstraps []string, log *zerolog.Logger) (*Listener, error) { // Build the list of upstreams upstreamList := make([]Upstream, 0) for _, url := range upstreams { - logger.Infof("Adding DNS upstream - url: %s", url) - upstream, err := NewUpstreamHTTPS(url, bootstraps, logger) + log.Info().Msgf("Adding DNS upstream - url: %s", url) + upstream, err := NewUpstreamHTTPS(url, bootstraps, log) if err != nil { return nil, errors.Wrap(err, "failed to create HTTPS upstream") } @@ -191,5 +189,5 @@ func CreateListener(address string, port uint16, upstreams []string, bootstraps return nil, err } - return &Listener{server: server, logger: logger}, nil + return &Listener{server: server, log: log}, nil } diff --git a/tunnelrpc/log.go b/tunnelrpc/log.go index 262c0a72..4774ec15 100644 --- a/tunnelrpc/log.go +++ b/tunnelrpc/log.go @@ -3,25 +3,25 @@ package tunnelrpc import ( "context" - "github.com/cloudflare/cloudflared/logger" + "github.com/rs/zerolog" "golang.org/x/net/trace" "zombiezen.com/go/capnproto2/rpc" ) -// ConnLogger wraps a logrus *log.Entry for a connection. +// ConnLogger wraps a Zerolog Logger for a connection. type ConnLogger struct { - Entry logger.Service + Log *zerolog.Logger } func (c ConnLogger) Infof(ctx context.Context, format string, args ...interface{}) { - c.Entry.Infof(format, args...) + c.Log.Info().Msgf(format, args...) } func (c ConnLogger) Errorf(ctx context.Context, format string, args ...interface{}) { - c.Entry.Errorf(format, args...) + c.Log.Error().Msgf(format, args...) } -func ConnLog(log logger.Service) rpc.ConnOption { +func ConnLog(log *zerolog.Logger) rpc.ConnOption { return rpc.ConnLog(ConnLogger{log}) } diff --git a/tunnelrpc/logtransport.go b/tunnelrpc/logtransport.go index 1d290ef4..265bcc02 100644 --- a/tunnelrpc/logtransport.go +++ b/tunnelrpc/logtransport.go @@ -5,7 +5,7 @@ import ( "bytes" "context" - "github.com/cloudflare/cloudflared/logger" + "github.com/rs/zerolog" "zombiezen.com/go/capnproto2/encoding/text" "zombiezen.com/go/capnproto2/rpc" rpccapnp "zombiezen.com/go/capnproto2/std/capnp/rpc" @@ -13,33 +13,33 @@ import ( type transport struct { rpc.Transport - l logger.Service + log *zerolog.Logger } // NewTransportLogger creates a new logger that proxies messages to and from t and -// logs them to l. If l is nil, then the log package's default +// logs them to log. If log is nil, then the log package's default // logger is used. -func NewTransportLogger(l logger.Service, t rpc.Transport) rpc.Transport { - return &transport{Transport: t, l: l} +func NewTransportLogger(log *zerolog.Logger, t rpc.Transport) rpc.Transport { + return &transport{Transport: t, log: log} } func (t *transport) SendMessage(ctx context.Context, msg rpccapnp.Message) error { - t.l.Debugf("rpcconnect: tx %s", formatMsg(msg)) + t.log.Debug().Msgf("rpcconnect: tx %s", formatMsg(msg)) return t.Transport.SendMessage(ctx, msg) } func (t *transport) RecvMessage(ctx context.Context) (rpccapnp.Message, error) { msg, err := t.Transport.RecvMessage(ctx) if err != nil { - t.l.Debugf("rpcconnect: rx error: %s", err) + t.log.Debug().Msgf("rpcconnect: rx error: %s", err) return msg, err } - t.l.Debugf("rpcconnect: rx %s", formatMsg(msg)) + t.log.Debug().Msgf("rpcconnect: rx %s", formatMsg(msg)) return msg, nil } func formatMsg(m rpccapnp.Message) string { var buf bytes.Buffer - text.NewEncoder(&buf).Encode(0x91b79f1f808db032, m.Struct) + _ = text.NewEncoder(&buf).Encode(0x91b79f1f808db032, m.Struct) return buf.String() } diff --git a/tunnelrpc/pogs/auth_test.go b/tunnelrpc/pogs/auth_test.go index f30f3977..fcc217f5 100644 --- a/tunnelrpc/pogs/auth_test.go +++ b/tunnelrpc/pogs/auth_test.go @@ -102,14 +102,14 @@ func TestWhenToRefresh(t *testing.T) { func TestSerializeAuthenticationResponse(t *testing.T) { tests := []*AuthenticateResponse{ - &AuthenticateResponse{ + { Jwt: []byte("\xbd\xb2\x3d\xbc\x20\xe2\x8c\x98"), HoursUntilRefresh: 24, }, - &AuthenticateResponse{ + { PermanentErr: "bad auth", }, - &AuthenticateResponse{ + { RetryableErr: "bad connection", HoursUntilRefresh: 24, }, diff --git a/tunnelstore/client.go b/tunnelstore/client.go index 5ea2728b..2e9c9ff5 100644 --- a/tunnelstore/client.go +++ b/tunnelstore/client.go @@ -13,8 +13,7 @@ import ( "github.com/google/uuid" "github.com/pkg/errors" - - "github.com/cloudflare/cloudflared/logger" + "github.com/rs/zerolog" ) const ( @@ -199,7 +198,7 @@ type RESTClient struct { authToken string userAgent string client http.Client - logger logger.Service + log *zerolog.Logger } type baseEndpoints struct { @@ -209,7 +208,7 @@ type baseEndpoints struct { var _ Client = (*RESTClient)(nil) -func NewRESTClient(baseURL, accountTag, zoneTag, authToken, userAgent string, logger logger.Service) (*RESTClient, error) { +func NewRESTClient(baseURL, accountTag, zoneTag, authToken, userAgent string, log *zerolog.Logger) (*RESTClient, error) { if strings.HasSuffix(baseURL, "/") { baseURL = baseURL[:len(baseURL)-1] } @@ -235,7 +234,7 @@ func NewRESTClient(baseURL, accountTag, zoneTag, authToken, userAgent string, lo }, Timeout: defaultTimeout, }, - logger: logger, + log: log, }, nil } diff --git a/validation/validation.go b/validation/validation.go index f8a4e5b8..3a5bfa83 100644 --- a/validation/validation.go +++ b/validation/validation.go @@ -202,7 +202,7 @@ func ValidateHTTPService(originURL string, hostname string, transport http.Round secondRequest.Host = hostname resp, secondErr := client.Do(secondRequest) if secondErr == nil { // Worked this time--advise the user to switch protocols - resp.Body.Close() + _ = resp.Body.Close() return errors.Errorf( "%s doesn't seem to work over %s, but does seem to work over %s. Reason: %v. Consider changing the origin URL to %v", parsedURL.Host, diff --git a/vendor/github.com/rs/zerolog/.gitignore b/vendor/github.com/rs/zerolog/.gitignore new file mode 100644 index 00000000..8ebe58b1 --- /dev/null +++ b/vendor/github.com/rs/zerolog/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +tmp + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/rs/zerolog/.travis.yml b/vendor/github.com/rs/zerolog/.travis.yml new file mode 100644 index 00000000..70b67c96 --- /dev/null +++ b/vendor/github.com/rs/zerolog/.travis.yml @@ -0,0 +1,15 @@ +language: go +go: +- "1.7" +- "1.8" +- "1.9" +- "1.10" +- "1.11" +- "1.12" +- "master" +matrix: + allow_failures: + - go: "master" +script: + - go test -v -race -cpu=1,2,4 -bench . -benchmem ./... + - go test -v -tags binary_log -race -cpu=1,2,4 -bench . -benchmem ./... diff --git a/vendor/github.com/rs/zerolog/CNAME b/vendor/github.com/rs/zerolog/CNAME new file mode 100644 index 00000000..9ce57a6e --- /dev/null +++ b/vendor/github.com/rs/zerolog/CNAME @@ -0,0 +1 @@ +zerolog.io \ No newline at end of file diff --git a/vendor/github.com/rs/zerolog/LICENSE b/vendor/github.com/rs/zerolog/LICENSE new file mode 100644 index 00000000..677e07f7 --- /dev/null +++ b/vendor/github.com/rs/zerolog/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2017 Olivier Poitrey + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/rs/zerolog/README.md b/vendor/github.com/rs/zerolog/README.md new file mode 100644 index 00000000..e1403fe3 --- /dev/null +++ b/vendor/github.com/rs/zerolog/README.md @@ -0,0 +1,618 @@ +# Zero Allocation JSON Logger + +[![godoc](http://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/rs/zerolog) [![license](http://img.shields.io/badge/license-MIT-red.svg?style=flat)](https://raw.githubusercontent.com/rs/zerolog/master/LICENSE) [![Build Status](https://travis-ci.org/rs/zerolog.svg?branch=master)](https://travis-ci.org/rs/zerolog) [![Coverage](http://gocover.io/_badge/github.com/rs/zerolog)](http://gocover.io/github.com/rs/zerolog) + +The zerolog package provides a fast and simple logger dedicated to JSON output. + +Zerolog's API is designed to provide both a great developer experience and stunning [performance](#benchmarks). Its unique chaining API allows zerolog to write JSON (or CBOR) log events by avoiding allocations and reflection. + +Uber's [zap](https://godoc.org/go.uber.org/zap) library pioneered this approach. Zerolog is taking this concept to the next level with a simpler to use API and even better performance. + +To keep the code base and the API simple, zerolog focuses on efficient structured logging only. Pretty logging on the console is made possible using the provided (but inefficient) [`zerolog.ConsoleWriter`](#pretty-logging). + +![Pretty Logging Image](pretty.png) + +## Who uses zerolog + +Find out [who uses zerolog](https://github.com/rs/zerolog/wiki/Who-uses-zerolog) and add your company / project to the list. + +## Features + +* Blazing fast +* Low to zero allocation +* Level logging +* Sampling +* Hooks +* Contextual fields +* `context.Context` integration +* `net/http` helpers +* JSON and CBOR encoding formats +* Pretty logging for development + +## Installation + +```bash +go get -u github.com/rs/zerolog/log +``` + +## Getting Started + +### Simple Logging Example + +For simple logging, import the global logger package **github.com/rs/zerolog/log** + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + // UNIX Time is faster and smaller than most timestamps + // If you set zerolog.TimeFieldFormat to an empty string, + // logs will write with UNIX time + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Print("hello world") +} + +// Output: {"time":1516134303,"level":"debug","message":"hello world"} +``` +> Note: By default log writes to `os.Stderr` +> Note: The default log level for `log.Print` is *debug* + +### Contextual Logging + +**zerolog** allows data to be added to log messages in the form of key:value pairs. The data added to the message adds "context" about the log event that can be critical for debugging as well as myriad other purposes. An example of this is below: + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Debug(). + Str("Scale", "833 cents"). + Float64("Interval", 833.09). + Msg("Fibonacci is everywhere") + + log.Debug(). + Str("Name", "Tom"). + Send() +} + +// Output: {"level":"debug","Scale":"833 cents","Interval":833.09,"time":1562212768,"message":"Fibonacci is everywhere"} +// Output: {"level":"debug","Name":"Tom","time":1562212768} +``` + +> You'll note in the above example that when adding contextual fields, the fields are strongly typed. You can find the full list of supported fields [here](#standard-types) + +### Leveled Logging + +#### Simple Leveled Logging Example + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Info().Msg("hello world") +} + +// Output: {"time":1516134303,"level":"info","message":"hello world"} +``` + +> It is very important to note that when using the **zerolog** chaining API, as shown above (`log.Info().Msg("hello world"`), the chain must have either the `Msg` or `Msgf` method call. If you forget to add either of these, the log will not occur and there is no compile time error to alert you of this. + +**zerolog** allows for logging at the following levels (from highest to lowest): + +* panic (`zerolog.PanicLevel`, 5) +* fatal (`zerolog.FatalLevel`, 4) +* error (`zerolog.ErrorLevel`, 3) +* warn (`zerolog.WarnLevel`, 2) +* info (`zerolog.InfoLevel`, 1) +* debug (`zerolog.DebugLevel`, 0) +* trace (`zerolog.TraceLevel`, -1) + +You can set the Global logging level to any of these options using the `SetGlobalLevel` function in the zerolog package, passing in one of the given constants above, e.g. `zerolog.InfoLevel` would be the "info" level. Whichever level is chosen, all logs with a level greater than or equal to that level will be written. To turn off logging entirely, pass the `zerolog.Disabled` constant. + +#### Setting Global Log Level + +This example uses command-line flags to demonstrate various outputs depending on the chosen log level. + +```go +package main + +import ( + "flag" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + debug := flag.Bool("debug", false, "sets log level to debug") + + flag.Parse() + + // Default level for this example is info, unless debug flag is present + zerolog.SetGlobalLevel(zerolog.InfoLevel) + if *debug { + zerolog.SetGlobalLevel(zerolog.DebugLevel) + } + + log.Debug().Msg("This message appears only when log level set to Debug") + log.Info().Msg("This message appears when log level set to Debug or Info") + + if e := log.Debug(); e.Enabled() { + // Compute log output only if enabled. + value := "bar" + e.Str("foo", value).Msg("some debug message") + } +} +``` + +Info Output (no flag) + +```bash +$ ./logLevelExample +{"time":1516387492,"level":"info","message":"This message appears when log level set to Debug or Info"} +``` + +Debug Output (debug flag set) + +```bash +$ ./logLevelExample -debug +{"time":1516387573,"level":"debug","message":"This message appears only when log level set to Debug"} +{"time":1516387573,"level":"info","message":"This message appears when log level set to Debug or Info"} +{"time":1516387573,"level":"debug","foo":"bar","message":"some debug message"} +``` + +#### Logging without Level or Message + +You may choose to log without a specific level by using the `Log` method. You may also write without a message by setting an empty string in the `msg string` parameter of the `Msg` method. Both are demonstrated in the example below. + +```go +package main + +import ( + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Log(). + Str("foo", "bar"). + Msg("") +} + +// Output: {"time":1494567715,"foo":"bar"} +``` + +#### Logging Fatal Messages + +```go +package main + +import ( + "errors" + + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +func main() { + err := errors.New("A repo man spends his life getting into tense situations") + service := "myservice" + + zerolog.TimeFieldFormat = zerolog.TimeFormatUnix + + log.Fatal(). + Err(err). + Str("service", service). + Msgf("Cannot start %s", service) +} + +// Output: {"time":1516133263,"level":"fatal","error":"A repo man spends his life getting into tense situations","service":"myservice","message":"Cannot start myservice"} +// exit status 1 +``` + +> NOTE: Using `Msgf` generates one allocation even when the logger is disabled. + +### Create logger instance to manage different outputs + +```go +logger := zerolog.New(os.Stderr).With().Timestamp().Logger() + +logger.Info().Str("foo", "bar").Msg("hello world") + +// Output: {"level":"info","time":1494567715,"message":"hello world","foo":"bar"} +``` + +### Sub-loggers let you chain loggers with additional context + +```go +sublogger := log.With(). + Str("component", "foo"). + Logger() +sublogger.Info().Msg("hello world") + +// Output: {"level":"info","time":1494567715,"message":"hello world","component":"foo"} +``` + +### Pretty logging + +To log a human-friendly, colorized output, use `zerolog.ConsoleWriter`: + +```go +log.Logger = log.Output(zerolog.ConsoleWriter{Out: os.Stderr}) + +log.Info().Str("foo", "bar").Msg("Hello world") + +// Output: 3:04PM INF Hello World foo=bar +``` + +To customize the configuration and formatting: + +```go +output := zerolog.ConsoleWriter{Out: os.Stdout, TimeFormat: time.RFC3339} +output.FormatLevel = func(i interface{}) string { + return strings.ToUpper(fmt.Sprintf("| %-6s|", i)) +} +output.FormatMessage = func(i interface{}) string { + return fmt.Sprintf("***%s****", i) +} +output.FormatFieldName = func(i interface{}) string { + return fmt.Sprintf("%s:", i) +} +output.FormatFieldValue = func(i interface{}) string { + return strings.ToUpper(fmt.Sprintf("%s", i)) +} + +log := zerolog.New(output).With().Timestamp().Logger() + +log.Info().Str("foo", "bar").Msg("Hello World") + +// Output: 2006-01-02T15:04:05Z07:00 | INFO | ***Hello World**** foo:BAR +``` + +### Sub dictionary + +```go +log.Info(). + Str("foo", "bar"). + Dict("dict", zerolog.Dict(). + Str("bar", "baz"). + Int("n", 1), + ).Msg("hello world") + +// Output: {"level":"info","time":1494567715,"foo":"bar","dict":{"bar":"baz","n":1},"message":"hello world"} +``` + +### Customize automatic field names + +```go +zerolog.TimestampFieldName = "t" +zerolog.LevelFieldName = "l" +zerolog.MessageFieldName = "m" + +log.Info().Msg("hello world") + +// Output: {"l":"info","t":1494567715,"m":"hello world"} +``` + +### Add contextual fields to the global logger + +```go +log.Logger = log.With().Str("foo", "bar").Logger() +``` + +### Add file and line number to log + +```go +log.Logger = log.With().Caller().Logger() +log.Info().Msg("hello world") + +// Output: {"level": "info", "message": "hello world", "caller": "/go/src/your_project/some_file:21"} +``` + + +### Thread-safe, lock-free, non-blocking writer + +If your writer might be slow or not thread-safe and you need your log producers to never get slowed down by a slow writer, you can use a `diode.Writer` as follow: + +```go +wr := diode.NewWriter(os.Stdout, 1000, 10*time.Millisecond, func(missed int) { + fmt.Printf("Logger Dropped %d messages", missed) + }) +log := zerolog.New(wr) +log.Print("test") +``` + +You will need to install `code.cloudfoundry.org/go-diodes` to use this feature. + +### Log Sampling + +```go +sampled := log.Sample(&zerolog.BasicSampler{N: 10}) +sampled.Info().Msg("will be logged every 10 messages") + +// Output: {"time":1494567715,"level":"info","message":"will be logged every 10 messages"} +``` + +More advanced sampling: + +```go +// Will let 5 debug messages per period of 1 second. +// Over 5 debug message, 1 every 100 debug messages are logged. +// Other levels are not sampled. +sampled := log.Sample(zerolog.LevelSampler{ + DebugSampler: &zerolog.BurstSampler{ + Burst: 5, + Period: 1*time.Second, + NextSampler: &zerolog.BasicSampler{N: 100}, + }, +}) +sampled.Debug().Msg("hello world") + +// Output: {"time":1494567715,"level":"debug","message":"hello world"} +``` + +### Hooks + +```go +type SeverityHook struct{} + +func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { + if level != zerolog.NoLevel { + e.Str("severity", level.String()) + } +} + +hooked := log.Hook(SeverityHook{}) +hooked.Warn().Msg("") + +// Output: {"level":"warn","severity":"warn"} +``` + +### Pass a sub-logger by context + +```go +ctx := log.With().Str("component", "module").Logger().WithContext(ctx) + +log.Ctx(ctx).Info().Msg("hello world") + +// Output: {"component":"module","level":"info","message":"hello world"} +``` + +### Set as standard logger output + +```go +log := zerolog.New(os.Stdout).With(). + Str("foo", "bar"). + Logger() + +stdlog.SetFlags(0) +stdlog.SetOutput(log) + +stdlog.Print("hello world") + +// Output: {"foo":"bar","message":"hello world"} +``` + +### Integration with `net/http` + +The `github.com/rs/zerolog/hlog` package provides some helpers to integrate zerolog with `http.Handler`. + +In this example we use [alice](https://github.com/justinas/alice) to install logger for better readability. + +```go +log := zerolog.New(os.Stdout).With(). + Timestamp(). + Str("role", "my-service"). + Str("host", host). + Logger() + +c := alice.New() + +// Install the logger handler with default output on the console +c = c.Append(hlog.NewHandler(log)) + +// Install some provided extra handler to set some request's context fields. +// Thanks to that handler, all our logs will come with some prepopulated fields. +c = c.Append(hlog.AccessHandler(func(r *http.Request, status, size int, duration time.Duration) { + hlog.FromRequest(r).Info(). + Str("method", r.Method). + Stringer("url", r.URL). + Int("status", status). + Int("size", size). + Dur("duration", duration). + Msg("") +})) +c = c.Append(hlog.RemoteAddrHandler("ip")) +c = c.Append(hlog.UserAgentHandler("user_agent")) +c = c.Append(hlog.RefererHandler("referer")) +c = c.Append(hlog.RequestIDHandler("req_id", "Request-Id")) + +// Here is your final handler +h := c.Then(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Get the logger from the request's context. You can safely assume it + // will be always there: if the handler is removed, hlog.FromRequest + // will return a no-op logger. + hlog.FromRequest(r).Info(). + Str("user", "current user"). + Str("status", "ok"). + Msg("Something happened") + + // Output: {"level":"info","time":"2001-02-03T04:05:06Z","role":"my-service","host":"local-hostname","req_id":"b4g0l5t6tfid6dtrapu0","user":"current user","status":"ok","message":"Something happened"} +})) +http.Handle("/", h) + +if err := http.ListenAndServe(":8080", nil); err != nil { + log.Fatal().Err(err).Msg("Startup failed") +} +``` + +## Multiple Log Output +`zerolog.MultiLevelWriter` may be used to send the log message to multiple outputs. +In this example, we send the log message to both `os.Stdout` and the in-built ConsoleWriter. +```go +func main() { + consoleWriter := zerolog.ConsoleWriter{Out: os.Stdout} + + multi := zerolog.MultiLevelWriter(consoleWriter, os.Stdout) + + logger := zerolog.New(multi).With().Timestamp().Logger() + + logger.Info().Msg("Hello World!") +} + +// Output (Line 1: Console; Line 2: Stdout) +// 12:36PM INF Hello World! +// {"level":"info","time":"2019-11-07T12:36:38+03:00","message":"Hello World!"} +``` + +## Global Settings + +Some settings can be changed and will by applied to all loggers: + +* `log.Logger`: You can set this value to customize the global logger (the one used by package level methods). +* `zerolog.SetGlobalLevel`: Can raise the minimum level of all loggers. Call this with `zerolog.Disabled` to disable logging altogether (quiet mode). +* `zerolog.DisableSampling`: If argument is `true`, all sampled loggers will stop sampling and issue 100% of their log events. +* `zerolog.TimestampFieldName`: Can be set to customize `Timestamp` field name. +* `zerolog.LevelFieldName`: Can be set to customize level field name. +* `zerolog.MessageFieldName`: Can be set to customize message field name. +* `zerolog.ErrorFieldName`: Can be set to customize `Err` field name. +* `zerolog.TimeFieldFormat`: Can be set to customize `Time` field value formatting. If set with `zerolog.TimeFormatUnix`, `zerolog.TimeFormatUnixMs` or `zerolog.TimeFormatUnixMicro`, times are formated as UNIX timestamp. +* `zerolog.DurationFieldUnit`: Can be set to customize the unit for time.Duration type fields added by `Dur` (default: `time.Millisecond`). +* `zerolog.DurationFieldInteger`: If set to `true`, `Dur` fields are formatted as integers instead of floats (default: `false`). +* `zerolog.ErrorHandler`: Called whenever zerolog fails to write an event on its output. If not set, an error is printed on the stderr. This handler must be thread safe and non-blocking. + +## Field Types + +### Standard Types + +* `Str` +* `Bool` +* `Int`, `Int8`, `Int16`, `Int32`, `Int64` +* `Uint`, `Uint8`, `Uint16`, `Uint32`, `Uint64` +* `Float32`, `Float64` + +### Advanced Fields + +* `Err`: Takes an `error` and renders it as a string using the `zerolog.ErrorFieldName` field name. +* `Timestamp`: Inserts a timestamp field with `zerolog.TimestampFieldName` field name, formatted using `zerolog.TimeFieldFormat`. +* `Time`: Adds a field with time formatted with `zerolog.TimeFieldFormat`. +* `Dur`: Adds a field with `time.Duration`. +* `Dict`: Adds a sub-key/value as a field of the event. +* `RawJSON`: Adds a field with an already encoded JSON (`[]byte`) +* `Hex`: Adds a field with value formatted as a hexadecimal string (`[]byte`) +* `Interface`: Uses reflection to marshal the type. + +Most fields are also available in the slice format (`Strs` for `[]string`, `Errs` for `[]error` etc.) + +## Binary Encoding + +In addition to the default JSON encoding, `zerolog` can produce binary logs using [CBOR](http://cbor.io) encoding. The choice of encoding can be decided at compile time using the build tag `binary_log` as follows: + +```bash +go build -tags binary_log . +``` + +To Decode binary encoded log files you can use any CBOR decoder. One has been tested to work +with zerolog library is [CSD](https://github.com/toravir/csd/). + +## Related Projects + +* [grpc-zerolog](https://github.com/cheapRoc/grpc-zerolog): Implementation of `grpclog.LoggerV2` interface using `zerolog` + +## Benchmarks + +See [logbench](http://hackemist.com/logbench/) for more comprehensive and up-to-date benchmarks. + +All operations are allocation free (those numbers *include* JSON encoding): + +```text +BenchmarkLogEmpty-8 100000000 19.1 ns/op 0 B/op 0 allocs/op +BenchmarkDisabled-8 500000000 4.07 ns/op 0 B/op 0 allocs/op +BenchmarkInfo-8 30000000 42.5 ns/op 0 B/op 0 allocs/op +BenchmarkContextFields-8 30000000 44.9 ns/op 0 B/op 0 allocs/op +BenchmarkLogFields-8 10000000 184 ns/op 0 B/op 0 allocs/op +``` + +There are a few Go logging benchmarks and comparisons that include zerolog. + +* [imkira/go-loggers-bench](https://github.com/imkira/go-loggers-bench) +* [uber-common/zap](https://github.com/uber-go/zap#performance) + +Using Uber's zap comparison benchmark: + +Log a message and 10 fields: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 767 ns/op | 552 B/op | 6 allocs/op | +| :zap: zap | 848 ns/op | 704 B/op | 2 allocs/op | +| :zap: zap (sugared) | 1363 ns/op | 1610 B/op | 20 allocs/op | +| go-kit | 3614 ns/op | 2895 B/op | 66 allocs/op | +| lion | 5392 ns/op | 5807 B/op | 63 allocs/op | +| logrus | 5661 ns/op | 6092 B/op | 78 allocs/op | +| apex/log | 15332 ns/op | 3832 B/op | 65 allocs/op | +| log15 | 20657 ns/op | 5632 B/op | 93 allocs/op | + +Log a message with a logger that already has 10 fields of context: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 52 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap | 283 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | +| lion | 2702 ns/op | 4074 B/op | 38 allocs/op | +| go-kit | 3378 ns/op | 3046 B/op | 52 allocs/op | +| logrus | 4309 ns/op | 4564 B/op | 63 allocs/op | +| apex/log | 13456 ns/op | 2898 B/op | 51 allocs/op | +| log15 | 14179 ns/op | 2642 B/op | 44 allocs/op | + +Log a static string, without any context or `printf`-style templating: + +| Library | Time | Bytes Allocated | Objects Allocated | +| :--- | :---: | :---: | :---: | +| zerolog | 50 ns/op | 0 B/op | 0 allocs/op | +| :zap: zap | 236 ns/op | 0 B/op | 0 allocs/op | +| standard library | 453 ns/op | 80 B/op | 2 allocs/op | +| :zap: zap (sugared) | 337 ns/op | 80 B/op | 2 allocs/op | +| go-kit | 508 ns/op | 656 B/op | 13 allocs/op | +| lion | 771 ns/op | 1224 B/op | 10 allocs/op | +| logrus | 1244 ns/op | 1505 B/op | 27 allocs/op | +| apex/log | 2751 ns/op | 584 B/op | 11 allocs/op | +| log15 | 5181 ns/op | 1592 B/op | 26 allocs/op | + +## Caveats + +Note that zerolog does no de-duplication of fields. Using the same key multiple times creates multiple keys in final JSON: + +```go +logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +logger.Info(). + Timestamp(). + Msg("dup") +// Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} +``` + +In this case, many consumers will take the last value, but this is not guaranteed; check yours if in doubt. diff --git a/vendor/github.com/rs/zerolog/_config.yml b/vendor/github.com/rs/zerolog/_config.yml new file mode 100644 index 00000000..a1e896d7 --- /dev/null +++ b/vendor/github.com/rs/zerolog/_config.yml @@ -0,0 +1 @@ +remote_theme: rs/gh-readme diff --git a/vendor/github.com/rs/zerolog/array.go b/vendor/github.com/rs/zerolog/array.go new file mode 100644 index 00000000..0f7f53ee --- /dev/null +++ b/vendor/github.com/rs/zerolog/array.go @@ -0,0 +1,233 @@ +package zerolog + +import ( + "net" + "sync" + "time" +) + +var arrayPool = &sync.Pool{ + New: func() interface{} { + return &Array{ + buf: make([]byte, 0, 500), + } + }, +} + +// Array is used to prepopulate an array of items +// which can be re-used to add to log messages. +type Array struct { + buf []byte +} + +func putArray(a *Array) { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + const maxSize = 1 << 16 // 64KiB + if cap(a.buf) > maxSize { + return + } + arrayPool.Put(a) +} + +// Arr creates an array to be added to an Event or Context. +func Arr() *Array { + a := arrayPool.Get().(*Array) + a.buf = a.buf[:0] + return a +} + +// MarshalZerologArray method here is no-op - since data is +// already in the needed format. +func (*Array) MarshalZerologArray(*Array) { +} + +func (a *Array) write(dst []byte) []byte { + dst = enc.AppendArrayStart(dst) + if len(a.buf) > 0 { + dst = append(append(dst, a.buf...)) + } + dst = enc.AppendArrayEnd(dst) + putArray(a) + return dst +} + +// Object marshals an object that implement the LogObjectMarshaler +// interface and append append it to the array. +func (a *Array) Object(obj LogObjectMarshaler) *Array { + e := Dict() + obj.MarshalZerologObject(e) + e.buf = enc.AppendEndMarker(e.buf) + a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) + putEvent(e) + return a +} + +// Str append append the val as a string to the array. +func (a *Array) Str(val string) *Array { + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Bytes append append the val as a string to the array. +func (a *Array) Bytes(val []byte) *Array { + a.buf = enc.AppendBytes(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Hex append append the val as a hex string to the array. +func (a *Array) Hex(val []byte) *Array { + a.buf = enc.AppendHex(enc.AppendArrayDelim(a.buf), val) + return a +} + +// RawJSON adds already encoded JSON to the array. +func (a *Array) RawJSON(val []byte) *Array { + a.buf = appendJSON(enc.AppendArrayDelim(a.buf), val) + return a +} + +// Err serializes and appends the err to the array. +func (a *Array) Err(err error) *Array { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + a.buf = append(enc.AppendArrayDelim(a.buf), e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + a.buf = enc.AppendNil(enc.AppendArrayDelim(a.buf)) + } else { + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m.Error()) + } + case string: + a.buf = enc.AppendString(enc.AppendArrayDelim(a.buf), m) + default: + a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), m) + } + + return a +} + +// Bool append append the val as a bool to the array. +func (a *Array) Bool(b bool) *Array { + a.buf = enc.AppendBool(enc.AppendArrayDelim(a.buf), b) + return a +} + +// Int append append i as a int to the array. +func (a *Array) Int(i int) *Array { + a.buf = enc.AppendInt(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int8 append append i as a int8 to the array. +func (a *Array) Int8(i int8) *Array { + a.buf = enc.AppendInt8(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int16 append append i as a int16 to the array. +func (a *Array) Int16(i int16) *Array { + a.buf = enc.AppendInt16(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int32 append append i as a int32 to the array. +func (a *Array) Int32(i int32) *Array { + a.buf = enc.AppendInt32(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Int64 append append i as a int64 to the array. +func (a *Array) Int64(i int64) *Array { + a.buf = enc.AppendInt64(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint append append i as a uint to the array. +func (a *Array) Uint(i uint) *Array { + a.buf = enc.AppendUint(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint8 append append i as a uint8 to the array. +func (a *Array) Uint8(i uint8) *Array { + a.buf = enc.AppendUint8(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint16 append append i as a uint16 to the array. +func (a *Array) Uint16(i uint16) *Array { + a.buf = enc.AppendUint16(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint32 append append i as a uint32 to the array. +func (a *Array) Uint32(i uint32) *Array { + a.buf = enc.AppendUint32(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Uint64 append append i as a uint64 to the array. +func (a *Array) Uint64(i uint64) *Array { + a.buf = enc.AppendUint64(enc.AppendArrayDelim(a.buf), i) + return a +} + +// Float32 append append f as a float32 to the array. +func (a *Array) Float32(f float32) *Array { + a.buf = enc.AppendFloat32(enc.AppendArrayDelim(a.buf), f) + return a +} + +// Float64 append append f as a float64 to the array. +func (a *Array) Float64(f float64) *Array { + a.buf = enc.AppendFloat64(enc.AppendArrayDelim(a.buf), f) + return a +} + +// Time append append t formated as string using zerolog.TimeFieldFormat. +func (a *Array) Time(t time.Time) *Array { + a.buf = enc.AppendTime(enc.AppendArrayDelim(a.buf), t, TimeFieldFormat) + return a +} + +// Dur append append d to the array. +func (a *Array) Dur(d time.Duration) *Array { + a.buf = enc.AppendDuration(enc.AppendArrayDelim(a.buf), d, DurationFieldUnit, DurationFieldInteger) + return a +} + +// Interface append append i marshaled using reflection. +func (a *Array) Interface(i interface{}) *Array { + if obj, ok := i.(LogObjectMarshaler); ok { + return a.Object(obj) + } + a.buf = enc.AppendInterface(enc.AppendArrayDelim(a.buf), i) + return a +} + +// IPAddr adds IPv4 or IPv6 address to the array +func (a *Array) IPAddr(ip net.IP) *Array { + a.buf = enc.AppendIPAddr(enc.AppendArrayDelim(a.buf), ip) + return a +} + +// IPPrefix adds IPv4 or IPv6 Prefix (IP + mask) to the array +func (a *Array) IPPrefix(pfx net.IPNet) *Array { + a.buf = enc.AppendIPPrefix(enc.AppendArrayDelim(a.buf), pfx) + return a +} + +// MACAddr adds a MAC (Ethernet) address to the array +func (a *Array) MACAddr(ha net.HardwareAddr) *Array { + a.buf = enc.AppendMACAddr(enc.AppendArrayDelim(a.buf), ha) + return a +} diff --git a/vendor/github.com/rs/zerolog/console.go b/vendor/github.com/rs/zerolog/console.go new file mode 100644 index 00000000..54f79945 --- /dev/null +++ b/vendor/github.com/rs/zerolog/console.go @@ -0,0 +1,397 @@ +package zerolog + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "os" + "sort" + "strconv" + "strings" + "sync" + "time" +) + +const ( + colorBlack = iota + 30 + colorRed + colorGreen + colorYellow + colorBlue + colorMagenta + colorCyan + colorWhite + + colorBold = 1 + colorDarkGray = 90 +) + +var ( + consoleBufPool = sync.Pool{ + New: func() interface{} { + return bytes.NewBuffer(make([]byte, 0, 100)) + }, + } +) + +const ( + consoleDefaultTimeFormat = time.Kitchen +) + +// Formatter transforms the input into a formatted string. +type Formatter func(interface{}) string + +// ConsoleWriter parses the JSON input and writes it in an +// (optionally) colorized, human-friendly format to Out. +type ConsoleWriter struct { + // Out is the output destination. + Out io.Writer + + // NoColor disables the colorized output. + NoColor bool + + // TimeFormat specifies the format for timestamp in output. + TimeFormat string + + // PartsOrder defines the order of parts in output. + PartsOrder []string + + FormatTimestamp Formatter + FormatLevel Formatter + FormatCaller Formatter + FormatMessage Formatter + FormatFieldName Formatter + FormatFieldValue Formatter + FormatErrFieldName Formatter + FormatErrFieldValue Formatter +} + +// NewConsoleWriter creates and initializes a new ConsoleWriter. +func NewConsoleWriter(options ...func(w *ConsoleWriter)) ConsoleWriter { + w := ConsoleWriter{ + Out: os.Stdout, + TimeFormat: consoleDefaultTimeFormat, + PartsOrder: consoleDefaultPartsOrder(), + } + + for _, opt := range options { + opt(&w) + } + + return w +} + +// Write transforms the JSON input with formatters and appends to w.Out. +func (w ConsoleWriter) Write(p []byte) (n int, err error) { + if w.PartsOrder == nil { + w.PartsOrder = consoleDefaultPartsOrder() + } + + var buf = consoleBufPool.Get().(*bytes.Buffer) + defer func() { + buf.Reset() + consoleBufPool.Put(buf) + }() + + var evt map[string]interface{} + p = decodeIfBinaryToBytes(p) + d := json.NewDecoder(bytes.NewReader(p)) + d.UseNumber() + err = d.Decode(&evt) + if err != nil { + return n, fmt.Errorf("cannot decode event: %s", err) + } + + for _, p := range w.PartsOrder { + w.writePart(buf, evt, p) + } + + w.writeFields(evt, buf) + + err = buf.WriteByte('\n') + if err != nil { + return n, err + } + _, err = buf.WriteTo(w.Out) + return len(p), err +} + +// writeFields appends formatted key-value pairs to buf. +func (w ConsoleWriter) writeFields(evt map[string]interface{}, buf *bytes.Buffer) { + var fields = make([]string, 0, len(evt)) + for field := range evt { + switch field { + case LevelFieldName, TimestampFieldName, MessageFieldName, CallerFieldName: + continue + } + fields = append(fields, field) + } + sort.Strings(fields) + + if len(fields) > 0 { + buf.WriteByte(' ') + } + + // Move the "error" field to the front + ei := sort.Search(len(fields), func(i int) bool { return fields[i] >= ErrorFieldName }) + if ei < len(fields) && fields[ei] == ErrorFieldName { + fields[ei] = "" + fields = append([]string{ErrorFieldName}, fields...) + var xfields = make([]string, 0, len(fields)) + for _, field := range fields { + if field == "" { // Skip empty fields + continue + } + xfields = append(xfields, field) + } + fields = xfields + } + + for i, field := range fields { + var fn Formatter + var fv Formatter + + if field == ErrorFieldName { + if w.FormatErrFieldName == nil { + fn = consoleDefaultFormatErrFieldName(w.NoColor) + } else { + fn = w.FormatErrFieldName + } + + if w.FormatErrFieldValue == nil { + fv = consoleDefaultFormatErrFieldValue(w.NoColor) + } else { + fv = w.FormatErrFieldValue + } + } else { + if w.FormatFieldName == nil { + fn = consoleDefaultFormatFieldName(w.NoColor) + } else { + fn = w.FormatFieldName + } + + if w.FormatFieldValue == nil { + fv = consoleDefaultFormatFieldValue + } else { + fv = w.FormatFieldValue + } + } + + buf.WriteString(fn(field)) + + switch fValue := evt[field].(type) { + case string: + if needsQuote(fValue) { + buf.WriteString(fv(strconv.Quote(fValue))) + } else { + buf.WriteString(fv(fValue)) + } + case json.Number: + buf.WriteString(fv(fValue)) + default: + b, err := json.Marshal(fValue) + if err != nil { + fmt.Fprintf(buf, colorize("[error: %v]", colorRed, w.NoColor), err) + } else { + fmt.Fprint(buf, fv(b)) + } + } + + if i < len(fields)-1 { // Skip space for last field + buf.WriteByte(' ') + } + } +} + +// writePart appends a formatted part to buf. +func (w ConsoleWriter) writePart(buf *bytes.Buffer, evt map[string]interface{}, p string) { + var f Formatter + + switch p { + case LevelFieldName: + if w.FormatLevel == nil { + f = consoleDefaultFormatLevel(w.NoColor) + } else { + f = w.FormatLevel + } + case TimestampFieldName: + if w.FormatTimestamp == nil { + f = consoleDefaultFormatTimestamp(w.TimeFormat, w.NoColor) + } else { + f = w.FormatTimestamp + } + case MessageFieldName: + if w.FormatMessage == nil { + f = consoleDefaultFormatMessage + } else { + f = w.FormatMessage + } + case CallerFieldName: + if w.FormatCaller == nil { + f = consoleDefaultFormatCaller(w.NoColor) + } else { + f = w.FormatCaller + } + default: + if w.FormatFieldValue == nil { + f = consoleDefaultFormatFieldValue + } else { + f = w.FormatFieldValue + } + } + + var s = f(evt[p]) + + if len(s) > 0 { + buf.WriteString(s) + if p != w.PartsOrder[len(w.PartsOrder)-1] { // Skip space for last part + buf.WriteByte(' ') + } + } +} + +// needsQuote returns true when the string s should be quoted in output. +func needsQuote(s string) bool { + for i := range s { + if s[i] < 0x20 || s[i] > 0x7e || s[i] == ' ' || s[i] == '\\' || s[i] == '"' { + return true + } + } + return false +} + +// colorize returns the string s wrapped in ANSI code c, unless disabled is true. +func colorize(s interface{}, c int, disabled bool) string { + if disabled { + return fmt.Sprintf("%s", s) + } + return fmt.Sprintf("\x1b[%dm%v\x1b[0m", c, s) +} + +// ----- DEFAULT FORMATTERS --------------------------------------------------- + +func consoleDefaultPartsOrder() []string { + return []string{ + TimestampFieldName, + LevelFieldName, + CallerFieldName, + MessageFieldName, + } +} + +func consoleDefaultFormatTimestamp(timeFormat string, noColor bool) Formatter { + if timeFormat == "" { + timeFormat = consoleDefaultTimeFormat + } + return func(i interface{}) string { + t := "" + switch tt := i.(type) { + case string: + ts, err := time.Parse(TimeFieldFormat, tt) + if err != nil { + t = tt + } else { + t = ts.Format(timeFormat) + } + case json.Number: + i, err := tt.Int64() + if err != nil { + t = tt.String() + } else { + var sec, nsec int64 = i, 0 + switch TimeFieldFormat { + case TimeFormatUnixMs: + nsec = int64(time.Duration(i) * time.Millisecond) + sec = 0 + case TimeFormatUnixMicro: + nsec = int64(time.Duration(i) * time.Microsecond) + sec = 0 + } + ts := time.Unix(sec, nsec).UTC() + t = ts.Format(timeFormat) + } + } + return colorize(t, colorDarkGray, noColor) + } +} + +func consoleDefaultFormatLevel(noColor bool) Formatter { + return func(i interface{}) string { + var l string + if ll, ok := i.(string); ok { + switch ll { + case "trace": + l = colorize("TRC", colorMagenta, noColor) + case "debug": + l = colorize("DBG", colorYellow, noColor) + case "info": + l = colorize("INF", colorGreen, noColor) + case "warn": + l = colorize("WRN", colorRed, noColor) + case "error": + l = colorize(colorize("ERR", colorRed, noColor), colorBold, noColor) + case "fatal": + l = colorize(colorize("FTL", colorRed, noColor), colorBold, noColor) + case "panic": + l = colorize(colorize("PNC", colorRed, noColor), colorBold, noColor) + default: + l = colorize("???", colorBold, noColor) + } + } else { + if i == nil { + l = colorize("???", colorBold, noColor) + } else { + l = strings.ToUpper(fmt.Sprintf("%s", i))[0:3] + } + } + return l + } +} + +func consoleDefaultFormatCaller(noColor bool) Formatter { + return func(i interface{}) string { + var c string + if cc, ok := i.(string); ok { + c = cc + } + if len(c) > 0 { + cwd, err := os.Getwd() + if err == nil { + c = strings.TrimPrefix(c, cwd) + c = strings.TrimPrefix(c, "/") + } + c = colorize(c, colorBold, noColor) + colorize(" >", colorCyan, noColor) + } + return c + } +} + +func consoleDefaultFormatMessage(i interface{}) string { + if i == nil { + return "" + } + return fmt.Sprintf("%s", i) +} + +func consoleDefaultFormatFieldName(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s=", i), colorCyan, noColor) + } +} + +func consoleDefaultFormatFieldValue(i interface{}) string { + return fmt.Sprintf("%s", i) +} + +func consoleDefaultFormatErrFieldName(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s=", i), colorRed, noColor) + } +} + +func consoleDefaultFormatErrFieldValue(noColor bool) Formatter { + return func(i interface{}) string { + return colorize(fmt.Sprintf("%s", i), colorRed, noColor) + } +} diff --git a/vendor/github.com/rs/zerolog/context.go b/vendor/github.com/rs/zerolog/context.go new file mode 100644 index 00000000..08c62fdc --- /dev/null +++ b/vendor/github.com/rs/zerolog/context.go @@ -0,0 +1,439 @@ +package zerolog + +import ( + "fmt" + "io/ioutil" + "math" + "net" + "time" +) + +// Context configures a new sub-logger with contextual fields. +type Context struct { + l Logger +} + +// Logger returns the logger with the context previously set. +func (c Context) Logger() Logger { + return c.l +} + +// Fields is a helper function to use a map to set fields using type assertion. +func (c Context) Fields(fields map[string]interface{}) Context { + c.l.context = appendFields(c.l.context, fields) + return c +} + +// Dict adds the field key with the dict to the logger context. +func (c Context) Dict(key string, dict *Event) Context { + dict.buf = enc.AppendEndMarker(dict.buf) + c.l.context = append(enc.AppendKey(c.l.context, key), dict.buf...) + putEvent(dict) + return c +} + +// Array adds the field key with an array to the event context. +// Use zerolog.Arr() to create the array or pass a type that +// implement the LogArrayMarshaler interface. +func (c Context) Array(key string, arr LogArrayMarshaler) Context { + c.l.context = enc.AppendKey(c.l.context, key) + if arr, ok := arr.(*Array); ok { + c.l.context = arr.write(c.l.context) + return c + } + var a *Array + if aa, ok := arr.(*Array); ok { + a = aa + } else { + a = Arr() + arr.MarshalZerologArray(a) + } + c.l.context = a.write(c.l.context) + return c +} + +// Object marshals an object that implement the LogObjectMarshaler interface. +func (c Context) Object(key string, obj LogObjectMarshaler) Context { + e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) + e.Object(key, obj) + c.l.context = enc.AppendObjectData(c.l.context, e.buf) + putEvent(e) + return c +} + +// EmbedObject marshals and Embeds an object that implement the LogObjectMarshaler interface. +func (c Context) EmbedObject(obj LogObjectMarshaler) Context { + e := newEvent(levelWriterAdapter{ioutil.Discard}, 0) + e.EmbedObject(obj) + c.l.context = enc.AppendObjectData(c.l.context, e.buf) + putEvent(e) + return c +} + +// Str adds the field key with val as a string to the logger context. +func (c Context) Str(key, val string) Context { + c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val) + return c +} + +// Strs adds the field key with val as a string to the logger context. +func (c Context) Strs(key string, vals []string) Context { + c.l.context = enc.AppendStrings(enc.AppendKey(c.l.context, key), vals) + return c +} + +// Stringer adds the field key with val.String() (or null if val is nil) to the logger context. +func (c Context) Stringer(key string, val fmt.Stringer) Context { + if val != nil { + c.l.context = enc.AppendString(enc.AppendKey(c.l.context, key), val.String()) + return c + } + + c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), nil) + return c +} + +// Bytes adds the field key with val as a []byte to the logger context. +func (c Context) Bytes(key string, val []byte) Context { + c.l.context = enc.AppendBytes(enc.AppendKey(c.l.context, key), val) + return c +} + +// Hex adds the field key with val as a hex string to the logger context. +func (c Context) Hex(key string, val []byte) Context { + c.l.context = enc.AppendHex(enc.AppendKey(c.l.context, key), val) + return c +} + +// RawJSON adds already encoded JSON to context. +// +// No sanity check is performed on b; it must not contain carriage returns and +// be valid JSON. +func (c Context) RawJSON(key string, b []byte) Context { + c.l.context = appendJSON(enc.AppendKey(c.l.context, key), b) + return c +} + +// AnErr adds the field key with serialized err to the logger context. +func (c Context) AnErr(key string, err error) Context { + switch m := ErrorMarshalFunc(err).(type) { + case nil: + return c + case LogObjectMarshaler: + return c.Object(key, m) + case error: + if m == nil || isNilValue(m) { + return c + } else { + return c.Str(key, m.Error()) + } + case string: + return c.Str(key, m) + default: + return c.Interface(key, m) + } +} + +// Errs adds the field key with errs as an array of serialized errors to the +// logger context. +func (c Context) Errs(key string, errs []error) Context { + arr := Arr() + for _, err := range errs { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + arr = arr.Object(m) + case error: + if m == nil || isNilValue(m) { + arr = arr.Interface(nil) + } else { + arr = arr.Str(m.Error()) + } + case string: + arr = arr.Str(m) + default: + arr = arr.Interface(m) + } + } + + return c.Array(key, arr) +} + +// Err adds the field "error" with serialized err to the logger context. +func (c Context) Err(err error) Context { + return c.AnErr(ErrorFieldName, err) +} + +// Bool adds the field key with val as a bool to the logger context. +func (c Context) Bool(key string, b bool) Context { + c.l.context = enc.AppendBool(enc.AppendKey(c.l.context, key), b) + return c +} + +// Bools adds the field key with val as a []bool to the logger context. +func (c Context) Bools(key string, b []bool) Context { + c.l.context = enc.AppendBools(enc.AppendKey(c.l.context, key), b) + return c +} + +// Int adds the field key with i as a int to the logger context. +func (c Context) Int(key string, i int) Context { + c.l.context = enc.AppendInt(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints adds the field key with i as a []int to the logger context. +func (c Context) Ints(key string, i []int) Context { + c.l.context = enc.AppendInts(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int8 adds the field key with i as a int8 to the logger context. +func (c Context) Int8(key string, i int8) Context { + c.l.context = enc.AppendInt8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints8 adds the field key with i as a []int8 to the logger context. +func (c Context) Ints8(key string, i []int8) Context { + c.l.context = enc.AppendInts8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int16 adds the field key with i as a int16 to the logger context. +func (c Context) Int16(key string, i int16) Context { + c.l.context = enc.AppendInt16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints16 adds the field key with i as a []int16 to the logger context. +func (c Context) Ints16(key string, i []int16) Context { + c.l.context = enc.AppendInts16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int32 adds the field key with i as a int32 to the logger context. +func (c Context) Int32(key string, i int32) Context { + c.l.context = enc.AppendInt32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints32 adds the field key with i as a []int32 to the logger context. +func (c Context) Ints32(key string, i []int32) Context { + c.l.context = enc.AppendInts32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Int64 adds the field key with i as a int64 to the logger context. +func (c Context) Int64(key string, i int64) Context { + c.l.context = enc.AppendInt64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Ints64 adds the field key with i as a []int64 to the logger context. +func (c Context) Ints64(key string, i []int64) Context { + c.l.context = enc.AppendInts64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint adds the field key with i as a uint to the logger context. +func (c Context) Uint(key string, i uint) Context { + c.l.context = enc.AppendUint(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints adds the field key with i as a []uint to the logger context. +func (c Context) Uints(key string, i []uint) Context { + c.l.context = enc.AppendUints(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint8 adds the field key with i as a uint8 to the logger context. +func (c Context) Uint8(key string, i uint8) Context { + c.l.context = enc.AppendUint8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints8 adds the field key with i as a []uint8 to the logger context. +func (c Context) Uints8(key string, i []uint8) Context { + c.l.context = enc.AppendUints8(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint16 adds the field key with i as a uint16 to the logger context. +func (c Context) Uint16(key string, i uint16) Context { + c.l.context = enc.AppendUint16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints16 adds the field key with i as a []uint16 to the logger context. +func (c Context) Uints16(key string, i []uint16) Context { + c.l.context = enc.AppendUints16(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint32 adds the field key with i as a uint32 to the logger context. +func (c Context) Uint32(key string, i uint32) Context { + c.l.context = enc.AppendUint32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints32 adds the field key with i as a []uint32 to the logger context. +func (c Context) Uints32(key string, i []uint32) Context { + c.l.context = enc.AppendUints32(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uint64 adds the field key with i as a uint64 to the logger context. +func (c Context) Uint64(key string, i uint64) Context { + c.l.context = enc.AppendUint64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Uints64 adds the field key with i as a []uint64 to the logger context. +func (c Context) Uints64(key string, i []uint64) Context { + c.l.context = enc.AppendUints64(enc.AppendKey(c.l.context, key), i) + return c +} + +// Float32 adds the field key with f as a float32 to the logger context. +func (c Context) Float32(key string, f float32) Context { + c.l.context = enc.AppendFloat32(enc.AppendKey(c.l.context, key), f) + return c +} + +// Floats32 adds the field key with f as a []float32 to the logger context. +func (c Context) Floats32(key string, f []float32) Context { + c.l.context = enc.AppendFloats32(enc.AppendKey(c.l.context, key), f) + return c +} + +// Float64 adds the field key with f as a float64 to the logger context. +func (c Context) Float64(key string, f float64) Context { + c.l.context = enc.AppendFloat64(enc.AppendKey(c.l.context, key), f) + return c +} + +// Floats64 adds the field key with f as a []float64 to the logger context. +func (c Context) Floats64(key string, f []float64) Context { + c.l.context = enc.AppendFloats64(enc.AppendKey(c.l.context, key), f) + return c +} + +type timestampHook struct{} + +func (ts timestampHook) Run(e *Event, level Level, msg string) { + e.Timestamp() +} + +var th = timestampHook{} + +// Timestamp adds the current local time as UNIX timestamp to the logger context with the "time" key. +// To customize the key name, change zerolog.TimestampFieldName. +// +// NOTE: It won't dedupe the "time" key if the *Context has one already. +func (c Context) Timestamp() Context { + c.l = c.l.Hook(th) + return c +} + +// Time adds the field key with t formated as string using zerolog.TimeFieldFormat. +func (c Context) Time(key string, t time.Time) Context { + c.l.context = enc.AppendTime(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) + return c +} + +// Times adds the field key with t formated as string using zerolog.TimeFieldFormat. +func (c Context) Times(key string, t []time.Time) Context { + c.l.context = enc.AppendTimes(enc.AppendKey(c.l.context, key), t, TimeFieldFormat) + return c +} + +// Dur adds the fields key with d divided by unit and stored as a float. +func (c Context) Dur(key string, d time.Duration) Context { + c.l.context = enc.AppendDuration(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) + return c +} + +// Durs adds the fields key with d divided by unit and stored as a float. +func (c Context) Durs(key string, d []time.Duration) Context { + c.l.context = enc.AppendDurations(enc.AppendKey(c.l.context, key), d, DurationFieldUnit, DurationFieldInteger) + return c +} + +// Interface adds the field key with obj marshaled using reflection. +func (c Context) Interface(key string, i interface{}) Context { + c.l.context = enc.AppendInterface(enc.AppendKey(c.l.context, key), i) + return c +} + +type callerHook struct { + callerSkipFrameCount int +} + +func newCallerHook(skipFrameCount int) callerHook { + return callerHook{callerSkipFrameCount: skipFrameCount} +} + +func (ch callerHook) Run(e *Event, level Level, msg string) { + switch ch.callerSkipFrameCount { + case useGlobalSkipFrameCount: + // Extra frames to skip (added by hook infra). + e.caller(CallerSkipFrameCount + contextCallerSkipFrameCount) + default: + // Extra frames to skip (added by hook infra). + e.caller(ch.callerSkipFrameCount + contextCallerSkipFrameCount) + } +} + +// useGlobalSkipFrameCount acts as a flag to informat callerHook.Run +// to use the global CallerSkipFrameCount. +const useGlobalSkipFrameCount = math.MinInt32 + +// ch is the default caller hook using the global CallerSkipFrameCount. +var ch = newCallerHook(useGlobalSkipFrameCount) + +// Caller adds the file:line of the caller with the zerolog.CallerFieldName key. +func (c Context) Caller() Context { + c.l = c.l.Hook(ch) + return c +} + +// CallerWithSkipFrameCount adds the file:line of the caller with the zerolog.CallerFieldName key. +// The specified skipFrameCount int will override the global CallerSkipFrameCount for this context's respective logger. +// If set to -1 the global CallerSkipFrameCount will be used. +func (c Context) CallerWithSkipFrameCount(skipFrameCount int) Context { + c.l = c.l.Hook(newCallerHook(skipFrameCount)) + return c +} + +type stackTraceHook struct{} + +func (sh stackTraceHook) Run(e *Event, level Level, msg string) { + e.Stack() +} + +var sh = stackTraceHook{} + +// Stack enables stack trace printing for the error passed to Err(). +func (c Context) Stack() Context { + c.l = c.l.Hook(sh) + return c +} + +// IPAddr adds IPv4 or IPv6 Address to the context +func (c Context) IPAddr(key string, ip net.IP) Context { + c.l.context = enc.AppendIPAddr(enc.AppendKey(c.l.context, key), ip) + return c +} + +// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the context +func (c Context) IPPrefix(key string, pfx net.IPNet) Context { + c.l.context = enc.AppendIPPrefix(enc.AppendKey(c.l.context, key), pfx) + return c +} + +// MACAddr adds MAC address to the context +func (c Context) MACAddr(key string, ha net.HardwareAddr) Context { + c.l.context = enc.AppendMACAddr(enc.AppendKey(c.l.context, key), ha) + return c +} diff --git a/vendor/github.com/rs/zerolog/ctx.go b/vendor/github.com/rs/zerolog/ctx.go new file mode 100644 index 00000000..ce18a32c --- /dev/null +++ b/vendor/github.com/rs/zerolog/ctx.go @@ -0,0 +1,48 @@ +package zerolog + +import ( + "context" +) + +var disabledLogger *Logger + +func init() { + SetGlobalLevel(TraceLevel) + l := Nop() + disabledLogger = &l +} + +type ctxKey struct{} + +// WithContext returns a copy of ctx with l associated. If an instance of Logger +// is already in the context, the context is not updated. +// +// For instance, to add a field to an existing logger in the context, use this +// notation: +// +// ctx := r.Context() +// l := zerolog.Ctx(ctx) +// l.UpdateContext(func(c Context) Context { +// return c.Str("bar", "baz") +// }) +func (l *Logger) WithContext(ctx context.Context) context.Context { + if lp, ok := ctx.Value(ctxKey{}).(*Logger); ok { + if lp == l { + // Do not store same logger. + return ctx + } + } else if l.level == Disabled { + // Do not store disabled logger. + return ctx + } + return context.WithValue(ctx, ctxKey{}, l) +} + +// Ctx returns the Logger associated with the ctx. If no logger +// is associated, a disabled logger is returned. +func Ctx(ctx context.Context) *Logger { + if l, ok := ctx.Value(ctxKey{}).(*Logger); ok { + return l + } + return disabledLogger +} diff --git a/vendor/github.com/rs/zerolog/encoder.go b/vendor/github.com/rs/zerolog/encoder.go new file mode 100644 index 00000000..09b24e80 --- /dev/null +++ b/vendor/github.com/rs/zerolog/encoder.go @@ -0,0 +1,56 @@ +package zerolog + +import ( + "net" + "time" +) + +type encoder interface { + AppendArrayDelim(dst []byte) []byte + AppendArrayEnd(dst []byte) []byte + AppendArrayStart(dst []byte) []byte + AppendBeginMarker(dst []byte) []byte + AppendBool(dst []byte, val bool) []byte + AppendBools(dst []byte, vals []bool) []byte + AppendBytes(dst, s []byte) []byte + AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte + AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte + AppendEndMarker(dst []byte) []byte + AppendFloat32(dst []byte, val float32) []byte + AppendFloat64(dst []byte, val float64) []byte + AppendFloats32(dst []byte, vals []float32) []byte + AppendFloats64(dst []byte, vals []float64) []byte + AppendHex(dst, s []byte) []byte + AppendIPAddr(dst []byte, ip net.IP) []byte + AppendIPPrefix(dst []byte, pfx net.IPNet) []byte + AppendInt(dst []byte, val int) []byte + AppendInt16(dst []byte, val int16) []byte + AppendInt32(dst []byte, val int32) []byte + AppendInt64(dst []byte, val int64) []byte + AppendInt8(dst []byte, val int8) []byte + AppendInterface(dst []byte, i interface{}) []byte + AppendInts(dst []byte, vals []int) []byte + AppendInts16(dst []byte, vals []int16) []byte + AppendInts32(dst []byte, vals []int32) []byte + AppendInts64(dst []byte, vals []int64) []byte + AppendInts8(dst []byte, vals []int8) []byte + AppendKey(dst []byte, key string) []byte + AppendLineBreak(dst []byte) []byte + AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte + AppendNil(dst []byte) []byte + AppendObjectData(dst []byte, o []byte) []byte + AppendString(dst []byte, s string) []byte + AppendStrings(dst []byte, vals []string) []byte + AppendTime(dst []byte, t time.Time, format string) []byte + AppendTimes(dst []byte, vals []time.Time, format string) []byte + AppendUint(dst []byte, val uint) []byte + AppendUint16(dst []byte, val uint16) []byte + AppendUint32(dst []byte, val uint32) []byte + AppendUint64(dst []byte, val uint64) []byte + AppendUint8(dst []byte, val uint8) []byte + AppendUints(dst []byte, vals []uint) []byte + AppendUints16(dst []byte, vals []uint16) []byte + AppendUints32(dst []byte, vals []uint32) []byte + AppendUints64(dst []byte, vals []uint64) []byte + AppendUints8(dst []byte, vals []uint8) []byte +} diff --git a/vendor/github.com/rs/zerolog/encoder_cbor.go b/vendor/github.com/rs/zerolog/encoder_cbor.go new file mode 100644 index 00000000..f8d3fe9e --- /dev/null +++ b/vendor/github.com/rs/zerolog/encoder_cbor.go @@ -0,0 +1,35 @@ +// +build binary_log + +package zerolog + +// This file contains bindings to do binary encoding. + +import ( + "github.com/rs/zerolog/internal/cbor" +) + +var ( + _ encoder = (*cbor.Encoder)(nil) + + enc = cbor.Encoder{} +) + +func appendJSON(dst []byte, j []byte) []byte { + return cbor.AppendEmbeddedJSON(dst, j) +} + +// decodeIfBinaryToString - converts a binary formatted log msg to a +// JSON formatted String Log message. +func decodeIfBinaryToString(in []byte) string { + return cbor.DecodeIfBinaryToString(in) +} + +func decodeObjectToStr(in []byte) string { + return cbor.DecodeObjectToStr(in) +} + +// decodeIfBinaryToBytes - converts a binary formatted log msg to a +// JSON formatted Bytes Log message. +func decodeIfBinaryToBytes(in []byte) []byte { + return cbor.DecodeIfBinaryToBytes(in) +} diff --git a/vendor/github.com/rs/zerolog/encoder_json.go b/vendor/github.com/rs/zerolog/encoder_json.go new file mode 100644 index 00000000..fe580f5f --- /dev/null +++ b/vendor/github.com/rs/zerolog/encoder_json.go @@ -0,0 +1,32 @@ +// +build !binary_log + +package zerolog + +// encoder_json.go file contains bindings to generate +// JSON encoded byte stream. + +import ( + "github.com/rs/zerolog/internal/json" +) + +var ( + _ encoder = (*json.Encoder)(nil) + + enc = json.Encoder{} +) + +func appendJSON(dst []byte, j []byte) []byte { + return append(dst, j...) +} + +func decodeIfBinaryToString(in []byte) string { + return string(in) +} + +func decodeObjectToStr(in []byte) string { + return string(in) +} + +func decodeIfBinaryToBytes(in []byte) []byte { + return in +} diff --git a/vendor/github.com/rs/zerolog/event.go b/vendor/github.com/rs/zerolog/event.go new file mode 100644 index 00000000..f1829bea --- /dev/null +++ b/vendor/github.com/rs/zerolog/event.go @@ -0,0 +1,736 @@ +package zerolog + +import ( + "fmt" + "net" + "os" + "runtime" + "sync" + "time" +) + +var eventPool = &sync.Pool{ + New: func() interface{} { + return &Event{ + buf: make([]byte, 0, 500), + } + }, +} + +// Event represents a log event. It is instanced by one of the level method of +// Logger and finalized by the Msg or Msgf method. +type Event struct { + buf []byte + w LevelWriter + level Level + done func(msg string) + stack bool // enable error stack trace + ch []Hook // hooks from context +} + +func putEvent(e *Event) { + // Proper usage of a sync.Pool requires each entry to have approximately + // the same memory cost. To obtain this property when the stored type + // contains a variably-sized buffer, we add a hard limit on the maximum buffer + // to place back in the pool. + // + // See https://golang.org/issue/23199 + const maxSize = 1 << 16 // 64KiB + if cap(e.buf) > maxSize { + return + } + eventPool.Put(e) +} + +// LogObjectMarshaler provides a strongly-typed and encoding-agnostic interface +// to be implemented by types used with Event/Context's Object methods. +type LogObjectMarshaler interface { + MarshalZerologObject(e *Event) +} + +// LogArrayMarshaler provides a strongly-typed and encoding-agnostic interface +// to be implemented by types used with Event/Context's Array methods. +type LogArrayMarshaler interface { + MarshalZerologArray(a *Array) +} + +func newEvent(w LevelWriter, level Level) *Event { + e := eventPool.Get().(*Event) + e.buf = e.buf[:0] + e.ch = nil + e.buf = enc.AppendBeginMarker(e.buf) + e.w = w + e.level = level + e.stack = false + return e +} + +func (e *Event) write() (err error) { + if e == nil { + return nil + } + if e.level != Disabled { + e.buf = enc.AppendEndMarker(e.buf) + e.buf = enc.AppendLineBreak(e.buf) + if e.w != nil { + _, err = e.w.WriteLevel(e.level, e.buf) + } + } + putEvent(e) + return +} + +// Enabled return false if the *Event is going to be filtered out by +// log level or sampling. +func (e *Event) Enabled() bool { + return e != nil && e.level != Disabled +} + +// Discard disables the event so Msg(f) won't print it. +func (e *Event) Discard() *Event { + if e == nil { + return e + } + e.level = Disabled + return nil +} + +// Msg sends the *Event with msg added as the message field if not empty. +// +// NOTICE: once this method is called, the *Event should be disposed. +// Calling Msg twice can have unexpected result. +func (e *Event) Msg(msg string) { + if e == nil { + return + } + e.msg(msg) +} + +// Send is equivalent to calling Msg(""). +// +// NOTICE: once this method is called, the *Event should be disposed. +func (e *Event) Send() { + if e == nil { + return + } + e.msg("") +} + +// Msgf sends the event with formatted msg added as the message field if not empty. +// +// NOTICE: once this method is called, the *Event should be disposed. +// Calling Msgf twice can have unexpected result. +func (e *Event) Msgf(format string, v ...interface{}) { + if e == nil { + return + } + e.msg(fmt.Sprintf(format, v...)) +} + +func (e *Event) msg(msg string) { + for _, hook := range e.ch { + hook.Run(e, e.level, msg) + } + if msg != "" { + e.buf = enc.AppendString(enc.AppendKey(e.buf, MessageFieldName), msg) + } + if e.done != nil { + defer e.done(msg) + } + if err := e.write(); err != nil { + if ErrorHandler != nil { + ErrorHandler(err) + } else { + fmt.Fprintf(os.Stderr, "zerolog: could not write event: %v\n", err) + } + } +} + +// Fields is a helper function to use a map to set fields using type assertion. +func (e *Event) Fields(fields map[string]interface{}) *Event { + if e == nil { + return e + } + e.buf = appendFields(e.buf, fields) + return e +} + +// Dict adds the field key with a dict to the event context. +// Use zerolog.Dict() to create the dictionary. +func (e *Event) Dict(key string, dict *Event) *Event { + if e == nil { + return e + } + dict.buf = enc.AppendEndMarker(dict.buf) + e.buf = append(enc.AppendKey(e.buf, key), dict.buf...) + putEvent(dict) + return e +} + +// Dict creates an Event to be used with the *Event.Dict method. +// Call usual field methods like Str, Int etc to add fields to this +// event and give it as argument the *Event.Dict method. +func Dict() *Event { + return newEvent(nil, 0) +} + +// Array adds the field key with an array to the event context. +// Use zerolog.Arr() to create the array or pass a type that +// implement the LogArrayMarshaler interface. +func (e *Event) Array(key string, arr LogArrayMarshaler) *Event { + if e == nil { + return e + } + e.buf = enc.AppendKey(e.buf, key) + var a *Array + if aa, ok := arr.(*Array); ok { + a = aa + } else { + a = Arr() + arr.MarshalZerologArray(a) + } + e.buf = a.write(e.buf) + return e +} + +func (e *Event) appendObject(obj LogObjectMarshaler) { + e.buf = enc.AppendBeginMarker(e.buf) + obj.MarshalZerologObject(e) + e.buf = enc.AppendEndMarker(e.buf) +} + +// Object marshals an object that implement the LogObjectMarshaler interface. +func (e *Event) Object(key string, obj LogObjectMarshaler) *Event { + if e == nil { + return e + } + e.buf = enc.AppendKey(e.buf, key) + e.appendObject(obj) + return e +} + +// EmbedObject marshals an object that implement the LogObjectMarshaler interface. +func (e *Event) EmbedObject(obj LogObjectMarshaler) *Event { + if e == nil { + return e + } + obj.MarshalZerologObject(e) + return e +} + +// Str adds the field key with val as a string to the *Event context. +func (e *Event) Str(key, val string) *Event { + if e == nil { + return e + } + e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val) + return e +} + +// Strs adds the field key with vals as a []string to the *Event context. +func (e *Event) Strs(key string, vals []string) *Event { + if e == nil { + return e + } + e.buf = enc.AppendStrings(enc.AppendKey(e.buf, key), vals) + return e +} + +// Stringer adds the field key with val.String() (or null if val is nil) to the *Event context. +func (e *Event) Stringer(key string, val fmt.Stringer) *Event { + if e == nil { + return e + } + + if val != nil { + e.buf = enc.AppendString(enc.AppendKey(e.buf, key), val.String()) + return e + } + + e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), nil) + return e +} + +// Bytes adds the field key with val as a string to the *Event context. +// +// Runes outside of normal ASCII ranges will be hex-encoded in the resulting +// JSON. +func (e *Event) Bytes(key string, val []byte) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBytes(enc.AppendKey(e.buf, key), val) + return e +} + +// Hex adds the field key with val as a hex string to the *Event context. +func (e *Event) Hex(key string, val []byte) *Event { + if e == nil { + return e + } + e.buf = enc.AppendHex(enc.AppendKey(e.buf, key), val) + return e +} + +// RawJSON adds already encoded JSON to the log line under key. +// +// No sanity check is performed on b; it must not contain carriage returns and +// be valid JSON. +func (e *Event) RawJSON(key string, b []byte) *Event { + if e == nil { + return e + } + e.buf = appendJSON(enc.AppendKey(e.buf, key), b) + return e +} + +// AnErr adds the field key with serialized err to the *Event context. +// If err is nil, no field is added. +func (e *Event) AnErr(key string, err error) *Event { + if e == nil { + return e + } + switch m := ErrorMarshalFunc(err).(type) { + case nil: + return e + case LogObjectMarshaler: + return e.Object(key, m) + case error: + if m == nil || isNilValue(m) { + return e + } else { + return e.Str(key, m.Error()) + } + case string: + return e.Str(key, m) + default: + return e.Interface(key, m) + } +} + +// Errs adds the field key with errs as an array of serialized errors to the +// *Event context. +func (e *Event) Errs(key string, errs []error) *Event { + if e == nil { + return e + } + arr := Arr() + for _, err := range errs { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + arr = arr.Object(m) + case error: + arr = arr.Err(m) + case string: + arr = arr.Str(m) + default: + arr = arr.Interface(m) + } + } + + return e.Array(key, arr) +} + +// Err adds the field "error" with serialized err to the *Event context. +// If err is nil, no field is added. +// +// To customize the key name, change zerolog.ErrorFieldName. +// +// If Stack() has been called before and zerolog.ErrorStackMarshaler is defined, +// the err is passed to ErrorStackMarshaler and the result is appended to the +// zerolog.ErrorStackFieldName. +func (e *Event) Err(err error) *Event { + if e == nil { + return e + } + if e.stack && ErrorStackMarshaler != nil { + switch m := ErrorStackMarshaler(err).(type) { + case nil: + case LogObjectMarshaler: + e.Object(ErrorStackFieldName, m) + case error: + if m != nil && !isNilValue(m) { + e.Str(ErrorStackFieldName, m.Error()) + } + case string: + e.Str(ErrorStackFieldName, m) + default: + e.Interface(ErrorStackFieldName, m) + } + } + return e.AnErr(ErrorFieldName, err) +} + +// Stack enables stack trace printing for the error passed to Err(). +// +// ErrorStackMarshaler must be set for this method to do something. +func (e *Event) Stack() *Event { + if e != nil { + e.stack = true + } + return e +} + +// Bool adds the field key with val as a bool to the *Event context. +func (e *Event) Bool(key string, b bool) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBool(enc.AppendKey(e.buf, key), b) + return e +} + +// Bools adds the field key with val as a []bool to the *Event context. +func (e *Event) Bools(key string, b []bool) *Event { + if e == nil { + return e + } + e.buf = enc.AppendBools(enc.AppendKey(e.buf, key), b) + return e +} + +// Int adds the field key with i as a int to the *Event context. +func (e *Event) Int(key string, i int) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints adds the field key with i as a []int to the *Event context. +func (e *Event) Ints(key string, i []int) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts(enc.AppendKey(e.buf, key), i) + return e +} + +// Int8 adds the field key with i as a int8 to the *Event context. +func (e *Event) Int8(key string, i int8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt8(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints8 adds the field key with i as a []int8 to the *Event context. +func (e *Event) Ints8(key string, i []int8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts8(enc.AppendKey(e.buf, key), i) + return e +} + +// Int16 adds the field key with i as a int16 to the *Event context. +func (e *Event) Int16(key string, i int16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt16(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints16 adds the field key with i as a []int16 to the *Event context. +func (e *Event) Ints16(key string, i []int16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts16(enc.AppendKey(e.buf, key), i) + return e +} + +// Int32 adds the field key with i as a int32 to the *Event context. +func (e *Event) Int32(key string, i int32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt32(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints32 adds the field key with i as a []int32 to the *Event context. +func (e *Event) Ints32(key string, i []int32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts32(enc.AppendKey(e.buf, key), i) + return e +} + +// Int64 adds the field key with i as a int64 to the *Event context. +func (e *Event) Int64(key string, i int64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInt64(enc.AppendKey(e.buf, key), i) + return e +} + +// Ints64 adds the field key with i as a []int64 to the *Event context. +func (e *Event) Ints64(key string, i []int64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendInts64(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint adds the field key with i as a uint to the *Event context. +func (e *Event) Uint(key string, i uint) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints adds the field key with i as a []int to the *Event context. +func (e *Event) Uints(key string, i []uint) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint8 adds the field key with i as a uint8 to the *Event context. +func (e *Event) Uint8(key string, i uint8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint8(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints8 adds the field key with i as a []int8 to the *Event context. +func (e *Event) Uints8(key string, i []uint8) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints8(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint16 adds the field key with i as a uint16 to the *Event context. +func (e *Event) Uint16(key string, i uint16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint16(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints16 adds the field key with i as a []int16 to the *Event context. +func (e *Event) Uints16(key string, i []uint16) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints16(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint32 adds the field key with i as a uint32 to the *Event context. +func (e *Event) Uint32(key string, i uint32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint32(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints32 adds the field key with i as a []int32 to the *Event context. +func (e *Event) Uints32(key string, i []uint32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints32(enc.AppendKey(e.buf, key), i) + return e +} + +// Uint64 adds the field key with i as a uint64 to the *Event context. +func (e *Event) Uint64(key string, i uint64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUint64(enc.AppendKey(e.buf, key), i) + return e +} + +// Uints64 adds the field key with i as a []int64 to the *Event context. +func (e *Event) Uints64(key string, i []uint64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendUints64(enc.AppendKey(e.buf, key), i) + return e +} + +// Float32 adds the field key with f as a float32 to the *Event context. +func (e *Event) Float32(key string, f float32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloat32(enc.AppendKey(e.buf, key), f) + return e +} + +// Floats32 adds the field key with f as a []float32 to the *Event context. +func (e *Event) Floats32(key string, f []float32) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloats32(enc.AppendKey(e.buf, key), f) + return e +} + +// Float64 adds the field key with f as a float64 to the *Event context. +func (e *Event) Float64(key string, f float64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloat64(enc.AppendKey(e.buf, key), f) + return e +} + +// Floats64 adds the field key with f as a []float64 to the *Event context. +func (e *Event) Floats64(key string, f []float64) *Event { + if e == nil { + return e + } + e.buf = enc.AppendFloats64(enc.AppendKey(e.buf, key), f) + return e +} + +// Timestamp adds the current local time as UNIX timestamp to the *Event context with the "time" key. +// To customize the key name, change zerolog.TimestampFieldName. +// +// NOTE: It won't dedupe the "time" key if the *Event (or *Context) has one +// already. +func (e *Event) Timestamp() *Event { + if e == nil { + return e + } + e.buf = enc.AppendTime(enc.AppendKey(e.buf, TimestampFieldName), TimestampFunc(), TimeFieldFormat) + return e +} + +// Time adds the field key with t formated as string using zerolog.TimeFieldFormat. +func (e *Event) Time(key string, t time.Time) *Event { + if e == nil { + return e + } + e.buf = enc.AppendTime(enc.AppendKey(e.buf, key), t, TimeFieldFormat) + return e +} + +// Times adds the field key with t formated as string using zerolog.TimeFieldFormat. +func (e *Event) Times(key string, t []time.Time) *Event { + if e == nil { + return e + } + e.buf = enc.AppendTimes(enc.AppendKey(e.buf, key), t, TimeFieldFormat) + return e +} + +// Dur adds the field key with duration d stored as zerolog.DurationFieldUnit. +// If zerolog.DurationFieldInteger is true, durations are rendered as integer +// instead of float. +func (e *Event) Dur(key string, d time.Duration) *Event { + if e == nil { + return e + } + e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// Durs adds the field key with duration d stored as zerolog.DurationFieldUnit. +// If zerolog.DurationFieldInteger is true, durations are rendered as integer +// instead of float. +func (e *Event) Durs(key string, d []time.Duration) *Event { + if e == nil { + return e + } + e.buf = enc.AppendDurations(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// TimeDiff adds the field key with positive duration between time t and start. +// If time t is not greater than start, duration will be 0. +// Duration format follows the same principle as Dur(). +func (e *Event) TimeDiff(key string, t time.Time, start time.Time) *Event { + if e == nil { + return e + } + var d time.Duration + if t.After(start) { + d = t.Sub(start) + } + e.buf = enc.AppendDuration(enc.AppendKey(e.buf, key), d, DurationFieldUnit, DurationFieldInteger) + return e +} + +// Interface adds the field key with i marshaled using reflection. +func (e *Event) Interface(key string, i interface{}) *Event { + if e == nil { + return e + } + if obj, ok := i.(LogObjectMarshaler); ok { + return e.Object(key, obj) + } + e.buf = enc.AppendInterface(enc.AppendKey(e.buf, key), i) + return e +} + +// Caller adds the file:line of the caller with the zerolog.CallerFieldName key. +// The argument skip is the number of stack frames to ascend +// Skip If not passed, use the global variable CallerSkipFrameCount +func (e *Event) Caller(skip ...int) *Event { + sk := CallerSkipFrameCount + if len(skip) > 0 { + sk = skip[0] + CallerSkipFrameCount + } + return e.caller(sk) +} + +func (e *Event) caller(skip int) *Event { + if e == nil { + return e + } + _, file, line, ok := runtime.Caller(skip) + if !ok { + return e + } + e.buf = enc.AppendString(enc.AppendKey(e.buf, CallerFieldName), CallerMarshalFunc(file, line)) + return e +} + +// IPAddr adds IPv4 or IPv6 Address to the event +func (e *Event) IPAddr(key string, ip net.IP) *Event { + if e == nil { + return e + } + e.buf = enc.AppendIPAddr(enc.AppendKey(e.buf, key), ip) + return e +} + +// IPPrefix adds IPv4 or IPv6 Prefix (address and mask) to the event +func (e *Event) IPPrefix(key string, pfx net.IPNet) *Event { + if e == nil { + return e + } + e.buf = enc.AppendIPPrefix(enc.AppendKey(e.buf, key), pfx) + return e +} + +// MACAddr adds MAC address to the event +func (e *Event) MACAddr(key string, ha net.HardwareAddr) *Event { + if e == nil { + return e + } + e.buf = enc.AppendMACAddr(enc.AppendKey(e.buf, key), ha) + return e +} diff --git a/vendor/github.com/rs/zerolog/fields.go b/vendor/github.com/rs/zerolog/fields.go new file mode 100644 index 00000000..cf3c3e91 --- /dev/null +++ b/vendor/github.com/rs/zerolog/fields.go @@ -0,0 +1,253 @@ +package zerolog + +import ( + "net" + "sort" + "time" + "unsafe" +) + +func isNilValue(i interface{}) bool { + return (*[2]uintptr)(unsafe.Pointer(&i))[1] == 0 +} + +func appendFields(dst []byte, fields map[string]interface{}) []byte { + keys := make([]string, 0, len(fields)) + for key := range fields { + keys = append(keys, key) + } + sort.Strings(keys) + for _, key := range keys { + dst = enc.AppendKey(dst, key) + val := fields[key] + if val, ok := val.(LogObjectMarshaler); ok { + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(val) + dst = append(dst, e.buf...) + putEvent(e) + continue + } + switch val := val.(type) { + case string: + dst = enc.AppendString(dst, val) + case []byte: + dst = enc.AppendBytes(dst, val) + case error: + switch m := ErrorMarshalFunc(val).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + dst = append(dst, e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + dst = enc.AppendNil(dst) + } else { + dst = enc.AppendString(dst, m.Error()) + } + case string: + dst = enc.AppendString(dst, m) + default: + dst = enc.AppendInterface(dst, m) + } + case []error: + dst = enc.AppendArrayStart(dst) + for i, err := range val { + switch m := ErrorMarshalFunc(err).(type) { + case LogObjectMarshaler: + e := newEvent(nil, 0) + e.buf = e.buf[:0] + e.appendObject(m) + dst = append(dst, e.buf...) + putEvent(e) + case error: + if m == nil || isNilValue(m) { + dst = enc.AppendNil(dst) + } else { + dst = enc.AppendString(dst, m.Error()) + } + case string: + dst = enc.AppendString(dst, m) + default: + dst = enc.AppendInterface(dst, m) + } + + if i < (len(val) - 1) { + enc.AppendArrayDelim(dst) + } + } + dst = enc.AppendArrayEnd(dst) + case bool: + dst = enc.AppendBool(dst, val) + case int: + dst = enc.AppendInt(dst, val) + case int8: + dst = enc.AppendInt8(dst, val) + case int16: + dst = enc.AppendInt16(dst, val) + case int32: + dst = enc.AppendInt32(dst, val) + case int64: + dst = enc.AppendInt64(dst, val) + case uint: + dst = enc.AppendUint(dst, val) + case uint8: + dst = enc.AppendUint8(dst, val) + case uint16: + dst = enc.AppendUint16(dst, val) + case uint32: + dst = enc.AppendUint32(dst, val) + case uint64: + dst = enc.AppendUint64(dst, val) + case float32: + dst = enc.AppendFloat32(dst, val) + case float64: + dst = enc.AppendFloat64(dst, val) + case time.Time: + dst = enc.AppendTime(dst, val, TimeFieldFormat) + case time.Duration: + dst = enc.AppendDuration(dst, val, DurationFieldUnit, DurationFieldInteger) + case *string: + if val != nil { + dst = enc.AppendString(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *bool: + if val != nil { + dst = enc.AppendBool(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int: + if val != nil { + dst = enc.AppendInt(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int8: + if val != nil { + dst = enc.AppendInt8(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int16: + if val != nil { + dst = enc.AppendInt16(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int32: + if val != nil { + dst = enc.AppendInt32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *int64: + if val != nil { + dst = enc.AppendInt64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint: + if val != nil { + dst = enc.AppendUint(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint8: + if val != nil { + dst = enc.AppendUint8(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint16: + if val != nil { + dst = enc.AppendUint16(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint32: + if val != nil { + dst = enc.AppendUint32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *uint64: + if val != nil { + dst = enc.AppendUint64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *float32: + if val != nil { + dst = enc.AppendFloat32(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *float64: + if val != nil { + dst = enc.AppendFloat64(dst, *val) + } else { + dst = enc.AppendNil(dst) + } + case *time.Time: + if val != nil { + dst = enc.AppendTime(dst, *val, TimeFieldFormat) + } else { + dst = enc.AppendNil(dst) + } + case *time.Duration: + if val != nil { + dst = enc.AppendDuration(dst, *val, DurationFieldUnit, DurationFieldInteger) + } else { + dst = enc.AppendNil(dst) + } + case []string: + dst = enc.AppendStrings(dst, val) + case []bool: + dst = enc.AppendBools(dst, val) + case []int: + dst = enc.AppendInts(dst, val) + case []int8: + dst = enc.AppendInts8(dst, val) + case []int16: + dst = enc.AppendInts16(dst, val) + case []int32: + dst = enc.AppendInts32(dst, val) + case []int64: + dst = enc.AppendInts64(dst, val) + case []uint: + dst = enc.AppendUints(dst, val) + // case []uint8: + // dst = enc.AppendUints8(dst, val) + case []uint16: + dst = enc.AppendUints16(dst, val) + case []uint32: + dst = enc.AppendUints32(dst, val) + case []uint64: + dst = enc.AppendUints64(dst, val) + case []float32: + dst = enc.AppendFloats32(dst, val) + case []float64: + dst = enc.AppendFloats64(dst, val) + case []time.Time: + dst = enc.AppendTimes(dst, val, TimeFieldFormat) + case []time.Duration: + dst = enc.AppendDurations(dst, val, DurationFieldUnit, DurationFieldInteger) + case nil: + dst = enc.AppendNil(dst) + case net.IP: + dst = enc.AppendIPAddr(dst, val) + case net.IPNet: + dst = enc.AppendIPPrefix(dst, val) + case net.HardwareAddr: + dst = enc.AppendMACAddr(dst, val) + default: + dst = enc.AppendInterface(dst, val) + } + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/globals.go b/vendor/github.com/rs/zerolog/globals.go new file mode 100644 index 00000000..421429a5 --- /dev/null +++ b/vendor/github.com/rs/zerolog/globals.go @@ -0,0 +1,114 @@ +package zerolog + +import ( + "strconv" + "sync/atomic" + "time" +) + +const ( + // TimeFormatUnix defines a time format that makes time fields to be + // serialized as Unix timestamp integers. + TimeFormatUnix = "" + + // TimeFormatUnixMs defines a time format that makes time fields to be + // serialized as Unix timestamp integers in milliseconds. + TimeFormatUnixMs = "UNIXMS" + + // TimeFormatUnixMicro defines a time format that makes time fields to be + // serialized as Unix timestamp integers in microseconds. + TimeFormatUnixMicro = "UNIXMICRO" +) + +var ( + // TimestampFieldName is the field name used for the timestamp field. + TimestampFieldName = "time" + + // LevelFieldName is the field name used for the level field. + LevelFieldName = "level" + + // LevelFieldMarshalFunc allows customization of global level field marshaling + LevelFieldMarshalFunc = func(l Level) string { + return l.String() + } + + // MessageFieldName is the field name used for the message field. + MessageFieldName = "message" + + // ErrorFieldName is the field name used for error fields. + ErrorFieldName = "error" + + // CallerFieldName is the field name used for caller field. + CallerFieldName = "caller" + + // CallerSkipFrameCount is the number of stack frames to skip to find the caller. + CallerSkipFrameCount = 2 + + // CallerMarshalFunc allows customization of global caller marshaling + CallerMarshalFunc = func(file string, line int) string { + return file + ":" + strconv.Itoa(line) + } + + // ErrorStackFieldName is the field name used for error stacks. + ErrorStackFieldName = "stack" + + // ErrorStackMarshaler extract the stack from err if any. + ErrorStackMarshaler func(err error) interface{} + + // ErrorMarshalFunc allows customization of global error marshaling + ErrorMarshalFunc = func(err error) interface{} { + return err + } + + // TimeFieldFormat defines the time format of the Time field type. If set to + // TimeFormatUnix, TimeFormatUnixMs or TimeFormatUnixMicro, the time is formatted as an UNIX + // timestamp as integer. + TimeFieldFormat = time.RFC3339 + + // TimestampFunc defines the function called to generate a timestamp. + TimestampFunc = time.Now + + // DurationFieldUnit defines the unit for time.Duration type fields added + // using the Dur method. + DurationFieldUnit = time.Millisecond + + // DurationFieldInteger renders Dur fields as integer instead of float if + // set to true. + DurationFieldInteger = false + + // ErrorHandler is called whenever zerolog fails to write an event on its + // output. If not set, an error is printed on the stderr. This handler must + // be thread safe and non-blocking. + ErrorHandler func(err error) +) + +var ( + gLevel = new(int32) + disableSampling = new(int32) +) + +// SetGlobalLevel sets the global override for log level. If this +// values is raised, all Loggers will use at least this value. +// +// To globally disable logs, set GlobalLevel to Disabled. +func SetGlobalLevel(l Level) { + atomic.StoreInt32(gLevel, int32(l)) +} + +// GlobalLevel returns the current global log level +func GlobalLevel() Level { + return Level(atomic.LoadInt32(gLevel)) +} + +// DisableSampling will disable sampling in all Loggers if true. +func DisableSampling(v bool) { + var i int32 + if v { + i = 1 + } + atomic.StoreInt32(disableSampling, i) +} + +func samplingDisabled() bool { + return atomic.LoadInt32(disableSampling) == 1 +} diff --git a/vendor/github.com/rs/zerolog/go.mod b/vendor/github.com/rs/zerolog/go.mod new file mode 100644 index 00000000..340ed40e --- /dev/null +++ b/vendor/github.com/rs/zerolog/go.mod @@ -0,0 +1,8 @@ +module github.com/rs/zerolog + +require ( + github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e + github.com/pkg/errors v0.8.1 + github.com/rs/xid v1.2.1 + golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74 +) diff --git a/vendor/github.com/rs/zerolog/go.sum b/vendor/github.com/rs/zerolog/go.sum new file mode 100644 index 00000000..13b9b2c3 --- /dev/null +++ b/vendor/github.com/rs/zerolog/go.sum @@ -0,0 +1,14 @@ +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74 h1:4cFkmztxtMslUX2SctSl+blCyXfpzhGOy9LhKAqSMA4= +golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/vendor/github.com/rs/zerolog/go112.go b/vendor/github.com/rs/zerolog/go112.go new file mode 100644 index 00000000..e7b5a1bd --- /dev/null +++ b/vendor/github.com/rs/zerolog/go112.go @@ -0,0 +1,7 @@ +// +build go1.12 + +package zerolog + +// Since go 1.12, some auto generated init functions are hidden from +// runtime.Caller. +const contextCallerSkipFrameCount = 2 diff --git a/vendor/github.com/rs/zerolog/hook.go b/vendor/github.com/rs/zerolog/hook.go new file mode 100644 index 00000000..ec6effc1 --- /dev/null +++ b/vendor/github.com/rs/zerolog/hook.go @@ -0,0 +1,64 @@ +package zerolog + +// Hook defines an interface to a log hook. +type Hook interface { + // Run runs the hook with the event. + Run(e *Event, level Level, message string) +} + +// HookFunc is an adaptor to allow the use of an ordinary function +// as a Hook. +type HookFunc func(e *Event, level Level, message string) + +// Run implements the Hook interface. +func (h HookFunc) Run(e *Event, level Level, message string) { + h(e, level, message) +} + +// LevelHook applies a different hook for each level. +type LevelHook struct { + NoLevelHook, TraceHook, DebugHook, InfoHook, WarnHook, ErrorHook, FatalHook, PanicHook Hook +} + +// Run implements the Hook interface. +func (h LevelHook) Run(e *Event, level Level, message string) { + switch level { + case TraceLevel: + if h.TraceHook != nil { + h.TraceHook.Run(e, level, message) + } + case DebugLevel: + if h.DebugHook != nil { + h.DebugHook.Run(e, level, message) + } + case InfoLevel: + if h.InfoHook != nil { + h.InfoHook.Run(e, level, message) + } + case WarnLevel: + if h.WarnHook != nil { + h.WarnHook.Run(e, level, message) + } + case ErrorLevel: + if h.ErrorHook != nil { + h.ErrorHook.Run(e, level, message) + } + case FatalLevel: + if h.FatalHook != nil { + h.FatalHook.Run(e, level, message) + } + case PanicLevel: + if h.PanicHook != nil { + h.PanicHook.Run(e, level, message) + } + case NoLevel: + if h.NoLevelHook != nil { + h.NoLevelHook.Run(e, level, message) + } + } +} + +// NewLevelHook returns a new LevelHook. +func NewLevelHook() LevelHook { + return LevelHook{} +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/README.md b/vendor/github.com/rs/zerolog/internal/cbor/README.md new file mode 100644 index 00000000..92c2e8c7 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/README.md @@ -0,0 +1,56 @@ +## Reference: + CBOR Encoding is described in [RFC7049](https://tools.ietf.org/html/rfc7049) + +## Comparison of JSON vs CBOR + +Two main areas of reduction are: + +1. CPU usage to write a log msg +2. Size (in bytes) of log messages. + + +CPU Usage savings are below: +``` +name JSON time/op CBOR time/op delta +Info-32 15.3ns ± 1% 11.7ns ± 3% -23.78% (p=0.000 n=9+10) +ContextFields-32 16.2ns ± 2% 12.3ns ± 3% -23.97% (p=0.000 n=9+9) +ContextAppend-32 6.70ns ± 0% 6.20ns ± 0% -7.44% (p=0.000 n=9+9) +LogFields-32 66.4ns ± 0% 24.6ns ± 2% -62.89% (p=0.000 n=10+9) +LogArrayObject-32 911ns ±11% 768ns ± 6% -15.64% (p=0.000 n=10+10) +LogFieldType/Floats-32 70.3ns ± 2% 29.5ns ± 1% -57.98% (p=0.000 n=10+10) +LogFieldType/Err-32 14.0ns ± 3% 12.1ns ± 8% -13.20% (p=0.000 n=8+10) +LogFieldType/Dur-32 17.2ns ± 2% 13.1ns ± 1% -24.27% (p=0.000 n=10+9) +LogFieldType/Object-32 54.3ns ±11% 52.3ns ± 7% ~ (p=0.239 n=10+10) +LogFieldType/Ints-32 20.3ns ± 2% 15.1ns ± 2% -25.50% (p=0.000 n=9+10) +LogFieldType/Interfaces-32 642ns ±11% 621ns ± 9% ~ (p=0.118 n=10+10) +LogFieldType/Interface(Objects)-32 635ns ±13% 632ns ± 9% ~ (p=0.592 n=10+10) +LogFieldType/Times-32 294ns ± 0% 27ns ± 1% -90.71% (p=0.000 n=10+9) +LogFieldType/Durs-32 121ns ± 0% 33ns ± 2% -72.44% (p=0.000 n=9+9) +LogFieldType/Interface(Object)-32 56.6ns ± 8% 52.3ns ± 8% -7.54% (p=0.007 n=10+10) +LogFieldType/Errs-32 17.8ns ± 3% 16.1ns ± 2% -9.71% (p=0.000 n=10+9) +LogFieldType/Time-32 40.5ns ± 1% 12.7ns ± 6% -68.66% (p=0.000 n=8+9) +LogFieldType/Bool-32 12.0ns ± 5% 10.2ns ± 2% -15.18% (p=0.000 n=10+8) +LogFieldType/Bools-32 17.2ns ± 2% 12.6ns ± 4% -26.63% (p=0.000 n=10+10) +LogFieldType/Int-32 12.3ns ± 2% 11.2ns ± 4% -9.27% (p=0.000 n=9+10) +LogFieldType/Float-32 16.7ns ± 1% 12.6ns ± 2% -24.42% (p=0.000 n=7+9) +LogFieldType/Str-32 12.7ns ± 7% 11.3ns ± 7% -10.88% (p=0.000 n=10+9) +LogFieldType/Strs-32 20.3ns ± 3% 18.2ns ± 3% -10.25% (p=0.000 n=9+10) +LogFieldType/Interface-32 183ns ±12% 175ns ± 9% ~ (p=0.078 n=10+10) +``` + +Log message size savings is greatly dependent on the number and type of fields in the log message. +Assuming this log message (with an Integer, timestamp and string, in addition to level). + +`{"level":"error","Fault":41650,"time":"2018-04-01T15:18:19-07:00","message":"Some Message"}` + +Two measurements were done for the log file sizes - one without any compression, second +using [compress/zlib](https://golang.org/pkg/compress/zlib/). + +Results for 10,000 log messages: + +| Log Format | Plain File Size (in KB) | Compressed File Size (in KB) | +| :--- | :---: | :---: | +| JSON | 920 | 28 | +| CBOR | 550 | 28 | + +The example used to calculate the above data is available in [Examples](examples). diff --git a/vendor/github.com/rs/zerolog/internal/cbor/base.go b/vendor/github.com/rs/zerolog/internal/cbor/base.go new file mode 100644 index 00000000..58cd0822 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/base.go @@ -0,0 +1,11 @@ +package cbor + +type Encoder struct{} + +// AppendKey adds a key (string) to the binary encoded log message +func (e Encoder) AppendKey(dst []byte, key string) []byte { + if len(dst) < 1 { + dst = e.AppendBeginMarker(dst) + } + return e.AppendString(dst, key) +} \ No newline at end of file diff --git a/vendor/github.com/rs/zerolog/internal/cbor/cbor.go b/vendor/github.com/rs/zerolog/internal/cbor/cbor.go new file mode 100644 index 00000000..969f5915 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/cbor.go @@ -0,0 +1,100 @@ +// Package cbor provides primitives for storing different data +// in the CBOR (binary) format. CBOR is defined in RFC7049. +package cbor + +import "time" + +const ( + majorOffset = 5 + additionalMax = 23 + + // Non Values. + additionalTypeBoolFalse byte = 20 + additionalTypeBoolTrue byte = 21 + additionalTypeNull byte = 22 + + // Integer (+ve and -ve) Sub-types. + additionalTypeIntUint8 byte = 24 + additionalTypeIntUint16 byte = 25 + additionalTypeIntUint32 byte = 26 + additionalTypeIntUint64 byte = 27 + + // Float Sub-types. + additionalTypeFloat16 byte = 25 + additionalTypeFloat32 byte = 26 + additionalTypeFloat64 byte = 27 + additionalTypeBreak byte = 31 + + // Tag Sub-types. + additionalTypeTimestamp byte = 01 + + // Extended Tags - from https://www.iana.org/assignments/cbor-tags/cbor-tags.xhtml + additionalTypeTagNetworkAddr uint16 = 260 + additionalTypeTagNetworkPrefix uint16 = 261 + additionalTypeEmbeddedJSON uint16 = 262 + additionalTypeTagHexString uint16 = 263 + + // Unspecified number of elements. + additionalTypeInfiniteCount byte = 31 +) +const ( + majorTypeUnsignedInt byte = iota << majorOffset // Major type 0 + majorTypeNegativeInt // Major type 1 + majorTypeByteString // Major type 2 + majorTypeUtf8String // Major type 3 + majorTypeArray // Major type 4 + majorTypeMap // Major type 5 + majorTypeTags // Major type 6 + majorTypeSimpleAndFloat // Major type 7 +) + +const ( + maskOutAdditionalType byte = (7 << majorOffset) + maskOutMajorType byte = 31 +) + +const ( + float32Nan = "\xfa\x7f\xc0\x00\x00" + float32PosInfinity = "\xfa\x7f\x80\x00\x00" + float32NegInfinity = "\xfa\xff\x80\x00\x00" + float64Nan = "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00" + float64PosInfinity = "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00" + float64NegInfinity = "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00" +) + +// IntegerTimeFieldFormat indicates the format of timestamp decoded +// from an integer (time in seconds). +var IntegerTimeFieldFormat = time.RFC3339 + +// NanoTimeFieldFormat indicates the format of timestamp decoded +// from a float value (time in seconds and nano seconds). +var NanoTimeFieldFormat = time.RFC3339Nano + +func appendCborTypePrefix(dst []byte, major byte, number uint64) []byte { + byteCount := 8 + var minor byte + switch { + case number < 256: + byteCount = 1 + minor = additionalTypeIntUint8 + + case number < 65536: + byteCount = 2 + minor = additionalTypeIntUint16 + + case number < 4294967296: + byteCount = 4 + minor = additionalTypeIntUint32 + + default: + byteCount = 8 + minor = additionalTypeIntUint64 + + } + dst = append(dst, byte(major|minor)) + byteCount-- + for ; byteCount >= 0; byteCount-- { + dst = append(dst, byte(number>>(uint(byteCount)*8))) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go b/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go new file mode 100644 index 00000000..e3cf3b7d --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/decode_stream.go @@ -0,0 +1,614 @@ +package cbor + +// This file contains code to decode a stream of CBOR Data into JSON. + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "net" + "runtime" + "strconv" + "strings" + "time" + "unicode/utf8" +) + +var decodeTimeZone *time.Location + +const hexTable = "0123456789abcdef" + +const isFloat32 = 4 +const isFloat64 = 8 + +func readNBytes(src *bufio.Reader, n int) []byte { + ret := make([]byte, n) + for i := 0; i < n; i++ { + ch, e := src.ReadByte() + if e != nil { + panic(fmt.Errorf("Tried to Read %d Bytes.. But hit end of file", n)) + } + ret[i] = ch + } + return ret +} + +func readByte(src *bufio.Reader) byte { + b, e := src.ReadByte() + if e != nil { + panic(fmt.Errorf("Tried to Read 1 Byte.. But hit end of file")) + } + return b +} + +func decodeIntAdditonalType(src *bufio.Reader, minor byte) int64 { + val := int64(0) + if minor <= 23 { + val = int64(minor) + } else { + bytesToRead := 0 + switch minor { + case additionalTypeIntUint8: + bytesToRead = 1 + case additionalTypeIntUint16: + bytesToRead = 2 + case additionalTypeIntUint32: + bytesToRead = 4 + case additionalTypeIntUint64: + bytesToRead = 8 + default: + panic(fmt.Errorf("Invalid Additional Type: %d in decodeInteger (expected <28)", minor)) + } + pb := readNBytes(src, bytesToRead) + for i := 0; i < bytesToRead; i++ { + val = val * 256 + val += int64(pb[i]) + } + } + return val +} + +func decodeInteger(src *bufio.Reader) int64 { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeUnsignedInt && major != majorTypeNegativeInt { + panic(fmt.Errorf("Major type is: %d in decodeInteger!! (expected 0 or 1)", major)) + } + val := decodeIntAdditonalType(src, minor) + if major == 0 { + return val + } + return (-1 - val) +} + +func decodeFloat(src *bufio.Reader) (float64, int) { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeSimpleAndFloat { + panic(fmt.Errorf("Incorrect Major type is: %d in decodeFloat", major)) + } + + switch minor { + case additionalTypeFloat16: + panic(fmt.Errorf("float16 is not suppported in decodeFloat")) + + case additionalTypeFloat32: + pb := readNBytes(src, 4) + switch string(pb) { + case float32Nan: + return math.NaN(), isFloat32 + case float32PosInfinity: + return math.Inf(0), isFloat32 + case float32NegInfinity: + return math.Inf(-1), isFloat32 + } + n := uint32(0) + for i := 0; i < 4; i++ { + n = n * 256 + n += uint32(pb[i]) + } + val := math.Float32frombits(n) + return float64(val), isFloat32 + case additionalTypeFloat64: + pb := readNBytes(src, 8) + switch string(pb) { + case float64Nan: + return math.NaN(), isFloat64 + case float64PosInfinity: + return math.Inf(0), isFloat64 + case float64NegInfinity: + return math.Inf(-1), isFloat64 + } + n := uint64(0) + for i := 0; i < 8; i++ { + n = n * 256 + n += uint64(pb[i]) + } + val := math.Float64frombits(n) + return val, isFloat64 + } + panic(fmt.Errorf("Invalid Additional Type: %d in decodeFloat", minor)) +} + +func decodeStringComplex(dst []byte, s string, pos uint) []byte { + i := int(pos) + start := 0 + + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRuneInString(s[i:]) + if r == utf8.RuneError && size == 1 { + // In case of error, first append previous simple characters to + // the byte slice if any and append a replacement character code + // in place of the invalid sequence. + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if b >= 0x20 && b <= 0x7e && b != '\\' && b != '"' { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hexTable[b>>4], hexTable[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} + +func decodeString(src *bufio.Reader, noQuotes bool) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeByteString { + panic(fmt.Errorf("Major type is: %d in decodeString", major)) + } + result := []byte{} + if !noQuotes { + result = append(result, '"') + } + length := decodeIntAdditonalType(src, minor) + len := int(length) + pbs := readNBytes(src, len) + result = append(result, pbs...) + if noQuotes { + return result + } + return append(result, '"') +} + +func decodeUTF8String(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeUtf8String { + panic(fmt.Errorf("Major type is: %d in decodeUTF8String", major)) + } + result := []byte{'"'} + length := decodeIntAdditonalType(src, minor) + len := int(length) + pbs := readNBytes(src, len) + + for i := 0; i < len; i++ { + // Check if the character needs encoding. Control characters, slashes, + // and the double quote need json encoding. Bytes above the ascii + // boundary needs utf8 encoding. + if pbs[i] < 0x20 || pbs[i] > 0x7e || pbs[i] == '\\' || pbs[i] == '"' { + // We encountered a character that needs to be encoded. Switch + // to complex version of the algorithm. + dst := []byte{'"'} + dst = decodeStringComplex(dst, string(pbs), uint(i)) + return append(dst, '"') + } + } + // The string has no need for encoding an therefore is directly + // appended to the byte slice. + result = append(result, pbs...) + return append(result, '"') +} + +func array2Json(src *bufio.Reader, dst io.Writer) { + dst.Write([]byte{'['}) + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeArray { + panic(fmt.Errorf("Major type is: %d in array2Json", major)) + } + len := 0 + unSpecifiedCount := false + if minor == additionalTypeInfiniteCount { + unSpecifiedCount = true + } else { + length := decodeIntAdditonalType(src, minor) + len = int(length) + } + for i := 0; unSpecifiedCount || i < len; i++ { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { + readByte(src) + break + } + } + cbor2JsonOneObject(src, dst) + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { + readByte(src) + break + } + dst.Write([]byte{','}) + } else if i+1 < len { + dst.Write([]byte{','}) + } + } + dst.Write([]byte{']'}) +} + +func map2Json(src *bufio.Reader, dst io.Writer) { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeMap { + panic(fmt.Errorf("Major type is: %d in map2Json", major)) + } + len := 0 + unSpecifiedCount := false + if minor == additionalTypeInfiniteCount { + unSpecifiedCount = true + } else { + length := decodeIntAdditonalType(src, minor) + len = int(length) + } + dst.Write([]byte{'{'}) + for i := 0; unSpecifiedCount || i < len; i++ { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { + readByte(src) + break + } + } + cbor2JsonOneObject(src, dst) + if i%2 == 0 { + // Even position values are keys. + dst.Write([]byte{':'}) + } else { + if unSpecifiedCount { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + if pb[0] == byte(majorTypeSimpleAndFloat|additionalTypeBreak) { + readByte(src) + break + } + dst.Write([]byte{','}) + } else if i+1 < len { + dst.Write([]byte{','}) + } + } + } + dst.Write([]byte{'}'}) +} + +func decodeTagData(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeTags { + panic(fmt.Errorf("Major type is: %d in decodeTagData", major)) + } + switch minor { + case additionalTypeTimestamp: + return decodeTimeStamp(src) + + // Tag value is larger than 256 (so uint16). + case additionalTypeIntUint16: + val := decodeIntAdditonalType(src, minor) + + switch uint16(val) { + case additionalTypeEmbeddedJSON: + pb := readByte(src) + dataMajor := pb & maskOutAdditionalType + if dataMajor != majorTypeByteString { + panic(fmt.Errorf("Unsupported embedded Type: %d in decodeEmbeddedJSON", dataMajor)) + } + src.UnreadByte() + return decodeString(src, true) + + case additionalTypeTagNetworkAddr: + octets := decodeString(src, true) + ss := []byte{'"'} + switch len(octets) { + case 6: // MAC address. + ha := net.HardwareAddr(octets) + ss = append(append(ss, ha.String()...), '"') + case 4: // IPv4 address. + fallthrough + case 16: // IPv6 address. + ip := net.IP(octets) + ss = append(append(ss, ip.String()...), '"') + default: + panic(fmt.Errorf("Unexpected Network Address length: %d (expected 4,6,16)", len(octets))) + } + return ss + + case additionalTypeTagNetworkPrefix: + pb := readByte(src) + if pb != byte(majorTypeMap|0x1) { + panic(fmt.Errorf("IP Prefix is NOT of MAP of 1 elements as expected")) + } + octets := decodeString(src, true) + val := decodeInteger(src) + ip := net.IP(octets) + var mask net.IPMask + pfxLen := int(val) + if len(octets) == 4 { + mask = net.CIDRMask(pfxLen, 32) + } else { + mask = net.CIDRMask(pfxLen, 128) + } + ipPfx := net.IPNet{IP: ip, Mask: mask} + ss := []byte{'"'} + ss = append(append(ss, ipPfx.String()...), '"') + return ss + + case additionalTypeTagHexString: + octets := decodeString(src, true) + ss := []byte{'"'} + for _, v := range octets { + ss = append(ss, hexTable[v>>4], hexTable[v&0x0f]) + } + return append(ss, '"') + + default: + panic(fmt.Errorf("Unsupported Additional Tag Type: %d in decodeTagData", val)) + } + } + panic(fmt.Errorf("Unsupported Additional Type: %d in decodeTagData", minor)) +} + +func decodeTimeStamp(src *bufio.Reader) []byte { + pb := readByte(src) + src.UnreadByte() + tsMajor := pb & maskOutAdditionalType + if tsMajor == majorTypeUnsignedInt || tsMajor == majorTypeNegativeInt { + n := decodeInteger(src) + t := time.Unix(n, 0) + if decodeTimeZone != nil { + t = t.In(decodeTimeZone) + } else { + t = t.In(time.UTC) + } + tsb := []byte{} + tsb = append(tsb, '"') + tsb = t.AppendFormat(tsb, IntegerTimeFieldFormat) + tsb = append(tsb, '"') + return tsb + } else if tsMajor == majorTypeSimpleAndFloat { + n, _ := decodeFloat(src) + secs := int64(n) + n -= float64(secs) + n *= float64(1e9) + t := time.Unix(secs, int64(n)) + if decodeTimeZone != nil { + t = t.In(decodeTimeZone) + } else { + t = t.In(time.UTC) + } + tsb := []byte{} + tsb = append(tsb, '"') + tsb = t.AppendFormat(tsb, NanoTimeFieldFormat) + tsb = append(tsb, '"') + return tsb + } + panic(fmt.Errorf("TS format is neigther int nor float: %d", tsMajor)) +} + +func decodeSimpleFloat(src *bufio.Reader) []byte { + pb := readByte(src) + major := pb & maskOutAdditionalType + minor := pb & maskOutMajorType + if major != majorTypeSimpleAndFloat { + panic(fmt.Errorf("Major type is: %d in decodeSimpleFloat", major)) + } + switch minor { + case additionalTypeBoolTrue: + return []byte("true") + case additionalTypeBoolFalse: + return []byte("false") + case additionalTypeNull: + return []byte("null") + case additionalTypeFloat16: + fallthrough + case additionalTypeFloat32: + fallthrough + case additionalTypeFloat64: + src.UnreadByte() + v, bc := decodeFloat(src) + ba := []byte{} + switch { + case math.IsNaN(v): + return []byte("\"NaN\"") + case math.IsInf(v, 1): + return []byte("\"+Inf\"") + case math.IsInf(v, -1): + return []byte("\"-Inf\"") + } + if bc == isFloat32 { + ba = strconv.AppendFloat(ba, v, 'f', -1, 32) + } else if bc == isFloat64 { + ba = strconv.AppendFloat(ba, v, 'f', -1, 64) + } else { + panic(fmt.Errorf("Invalid Float precision from decodeFloat: %d", bc)) + } + return ba + default: + panic(fmt.Errorf("Invalid Additional Type: %d in decodeSimpleFloat", minor)) + } +} + +func cbor2JsonOneObject(src *bufio.Reader, dst io.Writer) { + pb, e := src.Peek(1) + if e != nil { + panic(e) + } + major := (pb[0] & maskOutAdditionalType) + + switch major { + case majorTypeUnsignedInt: + fallthrough + case majorTypeNegativeInt: + n := decodeInteger(src) + dst.Write([]byte(strconv.Itoa(int(n)))) + + case majorTypeByteString: + s := decodeString(src, false) + dst.Write(s) + + case majorTypeUtf8String: + s := decodeUTF8String(src) + dst.Write(s) + + case majorTypeArray: + array2Json(src, dst) + + case majorTypeMap: + map2Json(src, dst) + + case majorTypeTags: + s := decodeTagData(src) + dst.Write(s) + + case majorTypeSimpleAndFloat: + s := decodeSimpleFloat(src) + dst.Write(s) + } +} + +func moreBytesToRead(src *bufio.Reader) bool { + _, e := src.ReadByte() + if e == nil { + src.UnreadByte() + return true + } + return false +} + +// Cbor2JsonManyObjects decodes all the CBOR Objects read from src +// reader. It keeps on decoding until reader returns EOF (error when reading). +// Decoded string is written to the dst. At the end of every CBOR Object +// newline is written to the output stream. +// +// Returns error (if any) that was encountered during decode. +// The child functions will generate a panic when error is encountered and +// this function will recover non-runtime Errors and return the reason as error. +func Cbor2JsonManyObjects(src io.Reader, dst io.Writer) (err error) { + defer func() { + if r := recover(); r != nil { + if _, ok := r.(runtime.Error); ok { + panic(r) + } + err = r.(error) + } + }() + bufRdr := bufio.NewReader(src) + for moreBytesToRead(bufRdr) { + cbor2JsonOneObject(bufRdr, dst) + dst.Write([]byte("\n")) + } + return nil +} + +// Detect if the bytes to be printed is Binary or not. +func binaryFmt(p []byte) bool { + if len(p) > 0 && p[0] > 0x7F { + return true + } + return false +} + +func getReader(str string) *bufio.Reader { + return bufio.NewReader(strings.NewReader(str)) +} + +// DecodeIfBinaryToString converts a binary formatted log msg to a +// JSON formatted String Log message - suitable for printing to Console/Syslog. +func DecodeIfBinaryToString(in []byte) string { + if binaryFmt(in) { + var b bytes.Buffer + Cbor2JsonManyObjects(strings.NewReader(string(in)), &b) + return b.String() + } + return string(in) +} + +// DecodeObjectToStr checks if the input is a binary format, if so, +// it will decode a single Object and return the decoded string. +func DecodeObjectToStr(in []byte) string { + if binaryFmt(in) { + var b bytes.Buffer + cbor2JsonOneObject(getReader(string(in)), &b) + return b.String() + } + return string(in) +} + +// DecodeIfBinaryToBytes checks if the input is a binary format, if so, +// it will decode all Objects and return the decoded string as byte array. +func DecodeIfBinaryToBytes(in []byte) []byte { + if binaryFmt(in) { + var b bytes.Buffer + Cbor2JsonManyObjects(bytes.NewReader(in), &b) + return b.Bytes() + } + return in +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/string.go b/vendor/github.com/rs/zerolog/internal/cbor/string.go new file mode 100644 index 00000000..ff42afab --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/string.go @@ -0,0 +1,68 @@ +package cbor + +// AppendStrings encodes and adds an array of strings to the dst byte array. +func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { + major := majorTypeArray + l := len(vals) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendString(dst, v) + } + return dst +} + +// AppendString encodes and adds a string to the dst byte array. +func (Encoder) AppendString(dst []byte, s string) []byte { + major := majorTypeUtf8String + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, majorTypeUtf8String, uint64(l)) + } + return append(dst, s...) +} + +// AppendBytes encodes and adds an array of bytes to the dst byte array. +func (Encoder) AppendBytes(dst, s []byte) []byte { + major := majorTypeByteString + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + return append(dst, s...) +} + +// AppendEmbeddedJSON adds a tag and embeds input JSON as such. +func AppendEmbeddedJSON(dst, s []byte) []byte { + major := majorTypeTags + minor := additionalTypeEmbeddedJSON + + // Append the TAG to indicate this is Embedded JSON. + dst = append(dst, byte(major|additionalTypeIntUint16)) + dst = append(dst, byte(minor>>8)) + dst = append(dst, byte(minor&0xff)) + + // Append the JSON Object as Byte String. + major = majorTypeByteString + + l := len(s) + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + return append(dst, s...) +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/time.go b/vendor/github.com/rs/zerolog/internal/cbor/time.go new file mode 100644 index 00000000..12f6a1dd --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/time.go @@ -0,0 +1,93 @@ +package cbor + +import ( + "time" +) + +func appendIntegerTimestamp(dst []byte, t time.Time) []byte { + major := majorTypeTags + minor := additionalTypeTimestamp + dst = append(dst, byte(major|minor)) + secs := t.Unix() + var val uint64 + if secs < 0 { + major = majorTypeNegativeInt + val = uint64(-secs - 1) + } else { + major = majorTypeUnsignedInt + val = uint64(secs) + } + dst = appendCborTypePrefix(dst, major, uint64(val)) + return dst +} + +func (e Encoder) appendFloatTimestamp(dst []byte, t time.Time) []byte { + major := majorTypeTags + minor := additionalTypeTimestamp + dst = append(dst, byte(major|minor)) + secs := t.Unix() + nanos := t.Nanosecond() + var val float64 + val = float64(secs)*1.0 + float64(nanos)*1E-9 + return e.AppendFloat64(dst, val) +} + +// AppendTime encodes and adds a timestamp to the dst byte array. +func (e Encoder) AppendTime(dst []byte, t time.Time, unused string) []byte { + utc := t.UTC() + if utc.Nanosecond() == 0 { + return appendIntegerTimestamp(dst, utc) + } + return e.appendFloatTimestamp(dst, utc) +} + +// AppendTimes encodes and adds an array of timestamps to the dst byte array. +func (e Encoder) AppendTimes(dst []byte, vals []time.Time, unused string) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + + for _, t := range vals { + dst = e.AppendTime(dst, t, unused) + } + return dst +} + +// AppendDuration encodes and adds a duration to the dst byte array. +// useInt field indicates whether to store the duration as seconds (integer) or +// as seconds+nanoseconds (float). +func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { + if useInt { + return e.AppendInt64(dst, int64(d/unit)) + } + return e.AppendFloat64(dst, float64(d)/float64(unit)) +} + +// AppendDurations encodes and adds an array of durations to the dst byte array. +// useInt field indicates whether to store the duration as seconds (integer) or +// as seconds+nanoseconds (float). +func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, d := range vals { + dst = e.AppendDuration(dst, d, unit, useInt) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/cbor/types.go b/vendor/github.com/rs/zerolog/internal/cbor/types.go new file mode 100644 index 00000000..3d76ea08 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/cbor/types.go @@ -0,0 +1,478 @@ +package cbor + +import ( + "encoding/json" + "fmt" + "math" + "net" +) + +// AppendNil inserts a 'Nil' object into the dst byte array. +func (Encoder) AppendNil(dst []byte) []byte { + return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeNull)) +} + +// AppendBeginMarker inserts a map start into the dst byte array. +func (Encoder) AppendBeginMarker(dst []byte) []byte { + return append(dst, byte(majorTypeMap|additionalTypeInfiniteCount)) +} + +// AppendEndMarker inserts a map end into the dst byte array. +func (Encoder) AppendEndMarker(dst []byte) []byte { + return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak)) +} + +// AppendObjectData takes an object in form of a byte array and appends to dst. +func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { + // BeginMarker is present in the dst, which + // should not be copied when appending to existing data. + return append(dst, o[1:]...) +} + +// AppendArrayStart adds markers to indicate the start of an array. +func (Encoder) AppendArrayStart(dst []byte) []byte { + return append(dst, byte(majorTypeArray|additionalTypeInfiniteCount)) +} + +// AppendArrayEnd adds markers to indicate the end of an array. +func (Encoder) AppendArrayEnd(dst []byte) []byte { + return append(dst, byte(majorTypeSimpleAndFloat|additionalTypeBreak)) +} + +// AppendArrayDelim adds markers to indicate end of a particular array element. +func (Encoder) AppendArrayDelim(dst []byte) []byte { + //No delimiters needed in cbor + return dst +} + +// AppendLineBreak is a noop that keep API compat with json encoder. +func (Encoder) AppendLineBreak(dst []byte) []byte { + // No line breaks needed in binary format. + return dst +} + +// AppendBool encodes and inserts a boolean value into the dst byte array. +func (Encoder) AppendBool(dst []byte, val bool) []byte { + b := additionalTypeBoolFalse + if val { + b = additionalTypeBoolTrue + } + return append(dst, byte(majorTypeSimpleAndFloat|b)) +} + +// AppendBools encodes and inserts an array of boolean values into the dst byte array. +func (e Encoder) AppendBools(dst []byte, vals []bool) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendBool(dst, v) + } + return dst +} + +// AppendInt encodes and inserts an integer value into the dst byte array. +func (Encoder) AppendInt(dst []byte, val int) []byte { + major := majorTypeUnsignedInt + contentVal := val + if val < 0 { + major = majorTypeNegativeInt + contentVal = -val - 1 + } + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendInts encodes and inserts an array of integer values into the dst byte array. +func (e Encoder) AppendInts(dst []byte, vals []int) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, v) + } + return dst +} + +// AppendInt8 encodes and inserts an int8 value into the dst byte array. +func (e Encoder) AppendInt8(dst []byte, val int8) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts8 encodes and inserts an array of integer values into the dst byte array. +func (e Encoder) AppendInts8(dst []byte, vals []int8) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt16 encodes and inserts a int16 value into the dst byte array. +func (e Encoder) AppendInt16(dst []byte, val int16) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts16 encodes and inserts an array of int16 values into the dst byte array. +func (e Encoder) AppendInts16(dst []byte, vals []int16) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt32 encodes and inserts a int32 value into the dst byte array. +func (e Encoder) AppendInt32(dst []byte, val int32) []byte { + return e.AppendInt(dst, int(val)) +} + +// AppendInts32 encodes and inserts an array of int32 values into the dst byte array. +func (e Encoder) AppendInts32(dst []byte, vals []int32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt(dst, int(v)) + } + return dst +} + +// AppendInt64 encodes and inserts a int64 value into the dst byte array. +func (Encoder) AppendInt64(dst []byte, val int64) []byte { + major := majorTypeUnsignedInt + contentVal := val + if val < 0 { + major = majorTypeNegativeInt + contentVal = -val - 1 + } + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendInts64 encodes and inserts an array of int64 values into the dst byte array. +func (e Encoder) AppendInts64(dst []byte, vals []int64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendInt64(dst, v) + } + return dst +} + +// AppendUint encodes and inserts an unsigned integer value into the dst byte array. +func (e Encoder) AppendUint(dst []byte, val uint) []byte { + return e.AppendInt64(dst, int64(val)) +} + +// AppendUints encodes and inserts an array of unsigned integer values into the dst byte array. +func (e Encoder) AppendUints(dst []byte, vals []uint) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint(dst, v) + } + return dst +} + +// AppendUint8 encodes and inserts a unsigned int8 value into the dst byte array. +func (e Encoder) AppendUint8(dst []byte, val uint8) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints8 encodes and inserts an array of uint8 values into the dst byte array. +func (e Encoder) AppendUints8(dst []byte, vals []uint8) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint8(dst, v) + } + return dst +} + +// AppendUint16 encodes and inserts a uint16 value into the dst byte array. +func (e Encoder) AppendUint16(dst []byte, val uint16) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints16 encodes and inserts an array of uint16 values into the dst byte array. +func (e Encoder) AppendUints16(dst []byte, vals []uint16) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint16(dst, v) + } + return dst +} + +// AppendUint32 encodes and inserts a uint32 value into the dst byte array. +func (e Encoder) AppendUint32(dst []byte, val uint32) []byte { + return e.AppendUint(dst, uint(val)) +} + +// AppendUints32 encodes and inserts an array of uint32 values into the dst byte array. +func (e Encoder) AppendUints32(dst []byte, vals []uint32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint32(dst, v) + } + return dst +} + +// AppendUint64 encodes and inserts a uint64 value into the dst byte array. +func (Encoder) AppendUint64(dst []byte, val uint64) []byte { + major := majorTypeUnsignedInt + contentVal := val + if contentVal <= additionalMax { + lb := byte(contentVal) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(contentVal)) + } + return dst +} + +// AppendUints64 encodes and inserts an array of uint64 values into the dst byte array. +func (e Encoder) AppendUints64(dst []byte, vals []uint64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendUint64(dst, v) + } + return dst +} + +// AppendFloat32 encodes and inserts a single precision float value into the dst byte array. +func (Encoder) AppendFloat32(dst []byte, val float32) []byte { + switch { + case math.IsNaN(float64(val)): + return append(dst, "\xfa\x7f\xc0\x00\x00"...) + case math.IsInf(float64(val), 1): + return append(dst, "\xfa\x7f\x80\x00\x00"...) + case math.IsInf(float64(val), -1): + return append(dst, "\xfa\xff\x80\x00\x00"...) + } + major := majorTypeSimpleAndFloat + subType := additionalTypeFloat32 + n := math.Float32bits(val) + var buf [4]byte + for i := uint(0); i < 4; i++ { + buf[i] = byte(n >> ((3 - i) * 8)) + } + return append(append(dst, byte(major|subType)), buf[0], buf[1], buf[2], buf[3]) +} + +// AppendFloats32 encodes and inserts an array of single precision float value into the dst byte array. +func (e Encoder) AppendFloats32(dst []byte, vals []float32) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendFloat32(dst, v) + } + return dst +} + +// AppendFloat64 encodes and inserts a double precision float value into the dst byte array. +func (Encoder) AppendFloat64(dst []byte, val float64) []byte { + switch { + case math.IsNaN(val): + return append(dst, "\xfb\x7f\xf8\x00\x00\x00\x00\x00\x00"...) + case math.IsInf(val, 1): + return append(dst, "\xfb\x7f\xf0\x00\x00\x00\x00\x00\x00"...) + case math.IsInf(val, -1): + return append(dst, "\xfb\xff\xf0\x00\x00\x00\x00\x00\x00"...) + } + major := majorTypeSimpleAndFloat + subType := additionalTypeFloat64 + n := math.Float64bits(val) + dst = append(dst, byte(major|subType)) + for i := uint(1); i <= 8; i++ { + b := byte(n >> ((8 - i) * 8)) + dst = append(dst, b) + } + return dst +} + +// AppendFloats64 encodes and inserts an array of double precision float values into the dst byte array. +func (e Encoder) AppendFloats64(dst []byte, vals []float64) []byte { + major := majorTypeArray + l := len(vals) + if l == 0 { + return e.AppendArrayEnd(e.AppendArrayStart(dst)) + } + if l <= additionalMax { + lb := byte(l) + dst = append(dst, byte(major|lb)) + } else { + dst = appendCborTypePrefix(dst, major, uint64(l)) + } + for _, v := range vals { + dst = e.AppendFloat64(dst, v) + } + return dst +} + +// AppendInterface takes an arbitrary object and converts it to JSON and embeds it dst. +func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { + marshaled, err := json.Marshal(i) + if err != nil { + return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) + } + return AppendEmbeddedJSON(dst, marshaled) +} + +// AppendIPAddr encodes and inserts an IP Address (IPv4 or IPv6). +func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { + dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) + dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) + dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) + return e.AppendBytes(dst, ip) +} + +// AppendIPPrefix encodes and inserts an IP Address Prefix (Address + Mask Length). +func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { + dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) + dst = append(dst, byte(additionalTypeTagNetworkPrefix>>8)) + dst = append(dst, byte(additionalTypeTagNetworkPrefix&0xff)) + + // Prefix is a tuple (aka MAP of 1 pair of elements) - + // first element is prefix, second is mask length. + dst = append(dst, byte(majorTypeMap|0x1)) + dst = e.AppendBytes(dst, pfx.IP) + maskLen, _ := pfx.Mask.Size() + return e.AppendUint8(dst, uint8(maskLen)) +} + +// AppendMACAddr encodes and inserts an Hardware (MAC) address. +func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { + dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) + dst = append(dst, byte(additionalTypeTagNetworkAddr>>8)) + dst = append(dst, byte(additionalTypeTagNetworkAddr&0xff)) + return e.AppendBytes(dst, ha) +} + +// AppendHex adds a TAG and inserts a hex bytes as a string. +func (e Encoder) AppendHex(dst []byte, val []byte) []byte { + dst = append(dst, byte(majorTypeTags|additionalTypeIntUint16)) + dst = append(dst, byte(additionalTypeTagHexString>>8)) + dst = append(dst, byte(additionalTypeTagHexString&0xff)) + return e.AppendBytes(dst, val) +} diff --git a/vendor/github.com/rs/zerolog/internal/json/base.go b/vendor/github.com/rs/zerolog/internal/json/base.go new file mode 100644 index 00000000..62248e71 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/base.go @@ -0,0 +1,11 @@ +package json + +type Encoder struct{} + +// AppendKey appends a new key to the output JSON. +func (e Encoder) AppendKey(dst []byte, key string) []byte { + if dst[len(dst)-1] != '{' { + dst = append(dst, ',') + } + return append(e.AppendString(dst, key), ':') +} diff --git a/vendor/github.com/rs/zerolog/internal/json/bytes.go b/vendor/github.com/rs/zerolog/internal/json/bytes.go new file mode 100644 index 00000000..de64120d --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/bytes.go @@ -0,0 +1,85 @@ +package json + +import "unicode/utf8" + +// AppendBytes is a mirror of appendString with []byte arg +func (Encoder) AppendBytes(dst, s []byte) []byte { + dst = append(dst, '"') + for i := 0; i < len(s); i++ { + if !noEscapeTable[s[i]] { + dst = appendBytesComplex(dst, s, i) + return append(dst, '"') + } + } + dst = append(dst, s...) + return append(dst, '"') +} + +// AppendHex encodes the input bytes to a hex string and appends +// the encoded string to the input byte slice. +// +// The operation loops though each byte and encodes it as hex using +// the hex lookup table. +func (Encoder) AppendHex(dst, s []byte) []byte { + dst = append(dst, '"') + for _, v := range s { + dst = append(dst, hex[v>>4], hex[v&0x0f]) + } + return append(dst, '"') +} + +// appendBytesComplex is a mirror of the appendStringComplex +// with []byte arg +func appendBytesComplex(dst, s []byte, i int) []byte { + start := 0 + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRune(s[i:]) + if r == utf8.RuneError && size == 1 { + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if noEscapeTable[b] { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/json/string.go b/vendor/github.com/rs/zerolog/internal/json/string.go new file mode 100644 index 00000000..815906ff --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/string.go @@ -0,0 +1,121 @@ +package json + +import "unicode/utf8" + +const hex = "0123456789abcdef" + +var noEscapeTable = [256]bool{} + +func init() { + for i := 0; i <= 0x7e; i++ { + noEscapeTable[i] = i >= 0x20 && i != '\\' && i != '"' + } +} + +// AppendStrings encodes the input strings to json and +// appends the encoded string list to the input byte slice. +func (e Encoder) AppendStrings(dst []byte, vals []string) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = e.AppendString(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = e.AppendString(append(dst, ','), val) + } + } + dst = append(dst, ']') + return dst +} + +// AppendString encodes the input string to json and appends +// the encoded string to the input byte slice. +// +// The operation loops though each byte in the string looking +// for characters that need json or utf8 encoding. If the string +// does not need encoding, then the string is appended in it's +// entirety to the byte slice. +// If we encounter a byte that does need encoding, switch up +// the operation and perform a byte-by-byte read-encode-append. +func (Encoder) AppendString(dst []byte, s string) []byte { + // Start with a double quote. + dst = append(dst, '"') + // Loop through each character in the string. + for i := 0; i < len(s); i++ { + // Check if the character needs encoding. Control characters, slashes, + // and the double quote need json encoding. Bytes above the ascii + // boundary needs utf8 encoding. + if !noEscapeTable[s[i]] { + // We encountered a character that needs to be encoded. Switch + // to complex version of the algorithm. + dst = appendStringComplex(dst, s, i) + return append(dst, '"') + } + } + // The string has no need for encoding an therefore is directly + // appended to the byte slice. + dst = append(dst, s...) + // End with a double quote + return append(dst, '"') +} + +// appendStringComplex is used by appendString to take over an in +// progress JSON string encoding that encountered a character that needs +// to be encoded. +func appendStringComplex(dst []byte, s string, i int) []byte { + start := 0 + for i < len(s) { + b := s[i] + if b >= utf8.RuneSelf { + r, size := utf8.DecodeRuneInString(s[i:]) + if r == utf8.RuneError && size == 1 { + // In case of error, first append previous simple characters to + // the byte slice if any and append a remplacement character code + // in place of the invalid sequence. + if start < i { + dst = append(dst, s[start:i]...) + } + dst = append(dst, `\ufffd`...) + i += size + start = i + continue + } + i += size + continue + } + if noEscapeTable[b] { + i++ + continue + } + // We encountered a character that needs to be encoded. + // Let's append the previous simple characters to the byte slice + // and switch our operation to read and encode the remainder + // characters byte-by-byte. + if start < i { + dst = append(dst, s[start:i]...) + } + switch b { + case '"', '\\': + dst = append(dst, '\\', b) + case '\b': + dst = append(dst, '\\', 'b') + case '\f': + dst = append(dst, '\\', 'f') + case '\n': + dst = append(dst, '\\', 'n') + case '\r': + dst = append(dst, '\\', 'r') + case '\t': + dst = append(dst, '\\', 't') + default: + dst = append(dst, '\\', 'u', '0', '0', hex[b>>4], hex[b&0xF]) + } + i++ + start = i + } + if start < len(s) { + dst = append(dst, s[start:]...) + } + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/json/time.go b/vendor/github.com/rs/zerolog/internal/json/time.go new file mode 100644 index 00000000..5aff6be3 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/time.go @@ -0,0 +1,106 @@ +package json + +import ( + "strconv" + "time" +) + +const ( + // Import from zerolog/global.go + timeFormatUnix = "" + timeFormatUnixMs = "UNIXMS" + timeFormatUnixMicro = "UNIXMICRO" +) + +// AppendTime formats the input time with the given format +// and appends the encoded string to the input byte slice. +func (e Encoder) AppendTime(dst []byte, t time.Time, format string) []byte { + switch format { + case timeFormatUnix: + return e.AppendInt64(dst, t.Unix()) + case timeFormatUnixMs: + return e.AppendInt64(dst, t.UnixNano()/1000000) + case timeFormatUnixMicro: + return e.AppendInt64(dst, t.UnixNano()/1000) + } + return append(t.AppendFormat(append(dst, '"'), format), '"') +} + +// AppendTimes converts the input times with the given format +// and appends the encoded string list to the input byte slice. +func (Encoder) AppendTimes(dst []byte, vals []time.Time, format string) []byte { + switch format { + case timeFormatUnix: + return appendUnixTimes(dst, vals) + case timeFormatUnixMs: + return appendUnixMsTimes(dst, vals) + } + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = append(vals[0].AppendFormat(append(dst, '"'), format), '"') + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = append(t.AppendFormat(append(dst, ',', '"'), format), '"') + } + } + dst = append(dst, ']') + return dst +} + +func appendUnixTimes(dst []byte, vals []time.Time) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0].Unix(), 10) + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), t.Unix(), 10) + } + } + dst = append(dst, ']') + return dst +} + +func appendUnixMsTimes(dst []byte, vals []time.Time) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0].UnixNano()/1000000, 10) + if len(vals) > 1 { + for _, t := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), t.UnixNano()/1000000, 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendDuration formats the input duration with the given unit & format +// and appends the encoded string to the input byte slice. +func (e Encoder) AppendDuration(dst []byte, d time.Duration, unit time.Duration, useInt bool) []byte { + if useInt { + return strconv.AppendInt(dst, int64(d/unit), 10) + } + return e.AppendFloat64(dst, float64(d)/float64(unit)) +} + +// AppendDurations formats the input durations with the given unit & format +// and appends the encoded string list to the input byte slice. +func (e Encoder) AppendDurations(dst []byte, vals []time.Duration, unit time.Duration, useInt bool) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = e.AppendDuration(dst, vals[0], unit, useInt) + if len(vals) > 1 { + for _, d := range vals[1:] { + dst = e.AppendDuration(append(dst, ','), d, unit, useInt) + } + } + dst = append(dst, ']') + return dst +} diff --git a/vendor/github.com/rs/zerolog/internal/json/types.go b/vendor/github.com/rs/zerolog/internal/json/types.go new file mode 100644 index 00000000..d1862426 --- /dev/null +++ b/vendor/github.com/rs/zerolog/internal/json/types.go @@ -0,0 +1,406 @@ +package json + +import ( + "encoding/json" + "fmt" + "math" + "net" + "strconv" +) + +// AppendNil inserts a 'Nil' object into the dst byte array. +func (Encoder) AppendNil(dst []byte) []byte { + return append(dst, "null"...) +} + +// AppendBeginMarker inserts a map start into the dst byte array. +func (Encoder) AppendBeginMarker(dst []byte) []byte { + return append(dst, '{') +} + +// AppendEndMarker inserts a map end into the dst byte array. +func (Encoder) AppendEndMarker(dst []byte) []byte { + return append(dst, '}') +} + +// AppendLineBreak appends a line break. +func (Encoder) AppendLineBreak(dst []byte) []byte { + return append(dst, '\n') +} + +// AppendArrayStart adds markers to indicate the start of an array. +func (Encoder) AppendArrayStart(dst []byte) []byte { + return append(dst, '[') +} + +// AppendArrayEnd adds markers to indicate the end of an array. +func (Encoder) AppendArrayEnd(dst []byte) []byte { + return append(dst, ']') +} + +// AppendArrayDelim adds markers to indicate end of a particular array element. +func (Encoder) AppendArrayDelim(dst []byte) []byte { + if len(dst) > 0 { + return append(dst, ',') + } + return dst +} + +// AppendBool converts the input bool to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendBool(dst []byte, val bool) []byte { + return strconv.AppendBool(dst, val) +} + +// AppendBools encodes the input bools to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendBools(dst []byte, vals []bool) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendBool(dst, vals[0]) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendBool(append(dst, ','), val) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt converts the input int to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt(dst []byte, val int) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts encodes the input ints to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts(dst []byte, vals []int) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt8 converts the input []int8 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt8(dst []byte, val int8) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts8 encodes the input int8s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts8(dst []byte, vals []int8) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt16 converts the input int16 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt16(dst []byte, val int16) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts16 encodes the input int16s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts16(dst []byte, vals []int16) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt32 converts the input int32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt32(dst []byte, val int32) []byte { + return strconv.AppendInt(dst, int64(val), 10) +} + +// AppendInts32 encodes the input int32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts32(dst []byte, vals []int32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, int64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), int64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInt64 converts the input int64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendInt64(dst []byte, val int64) []byte { + return strconv.AppendInt(dst, val, 10) +} + +// AppendInts64 encodes the input int64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendInts64(dst []byte, vals []int64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendInt(dst, vals[0], 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendInt(append(dst, ','), val, 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint converts the input uint to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint(dst []byte, val uint) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints encodes the input uints to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints(dst []byte, vals []uint) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint8 converts the input uint8 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint8(dst []byte, val uint8) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints8 encodes the input uint8s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints8(dst []byte, vals []uint8) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint16 converts the input uint16 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint16(dst []byte, val uint16) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints16 encodes the input uint16s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints16(dst []byte, vals []uint16) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint32 converts the input uint32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint32(dst []byte, val uint32) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints32 encodes the input uint32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints32(dst []byte, vals []uint32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, uint64(vals[0]), 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), uint64(val), 10) + } + } + dst = append(dst, ']') + return dst +} + +// AppendUint64 converts the input uint64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendUint64(dst []byte, val uint64) []byte { + return strconv.AppendUint(dst, uint64(val), 10) +} + +// AppendUints64 encodes the input uint64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendUints64(dst []byte, vals []uint64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = strconv.AppendUint(dst, vals[0], 10) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = strconv.AppendUint(append(dst, ','), val, 10) + } + } + dst = append(dst, ']') + return dst +} + +func appendFloat(dst []byte, val float64, bitSize int) []byte { + // JSON does not permit NaN or Infinity. A typical JSON encoder would fail + // with an error, but a logging library wants the data to get thru so we + // make a tradeoff and store those types as string. + switch { + case math.IsNaN(val): + return append(dst, `"NaN"`...) + case math.IsInf(val, 1): + return append(dst, `"+Inf"`...) + case math.IsInf(val, -1): + return append(dst, `"-Inf"`...) + } + return strconv.AppendFloat(dst, val, 'f', -1, bitSize) +} + +// AppendFloat32 converts the input float32 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendFloat32(dst []byte, val float32) []byte { + return appendFloat(dst, float64(val), 32) +} + +// AppendFloats32 encodes the input float32s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendFloats32(dst []byte, vals []float32) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = appendFloat(dst, float64(vals[0]), 32) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = appendFloat(append(dst, ','), float64(val), 32) + } + } + dst = append(dst, ']') + return dst +} + +// AppendFloat64 converts the input float64 to a string and +// appends the encoded string to the input byte slice. +func (Encoder) AppendFloat64(dst []byte, val float64) []byte { + return appendFloat(dst, val, 64) +} + +// AppendFloats64 encodes the input float64s to json and +// appends the encoded string list to the input byte slice. +func (Encoder) AppendFloats64(dst []byte, vals []float64) []byte { + if len(vals) == 0 { + return append(dst, '[', ']') + } + dst = append(dst, '[') + dst = appendFloat(dst, vals[0], 32) + if len(vals) > 1 { + for _, val := range vals[1:] { + dst = appendFloat(append(dst, ','), val, 64) + } + } + dst = append(dst, ']') + return dst +} + +// AppendInterface marshals the input interface to a string and +// appends the encoded string to the input byte slice. +func (e Encoder) AppendInterface(dst []byte, i interface{}) []byte { + marshaled, err := json.Marshal(i) + if err != nil { + return e.AppendString(dst, fmt.Sprintf("marshaling error: %v", err)) + } + return append(dst, marshaled...) +} + +// AppendObjectData takes in an object that is already in a byte array +// and adds it to the dst. +func (Encoder) AppendObjectData(dst []byte, o []byte) []byte { + // Three conditions apply here: + // 1. new content starts with '{' - which should be dropped OR + // 2. new content starts with '{' - which should be replaced with ',' + // to separate with existing content OR + // 3. existing content has already other fields + if o[0] == '{' { + if len(dst) > 1 { + dst = append(dst, ',') + } + o = o[1:] + } else if len(dst) > 1 { + dst = append(dst, ',') + } + return append(dst, o...) +} + +// AppendIPAddr adds IPv4 or IPv6 address to dst. +func (e Encoder) AppendIPAddr(dst []byte, ip net.IP) []byte { + return e.AppendString(dst, ip.String()) +} + +// AppendIPPrefix adds IPv4 or IPv6 Prefix (address & mask) to dst. +func (e Encoder) AppendIPPrefix(dst []byte, pfx net.IPNet) []byte { + return e.AppendString(dst, pfx.String()) + +} + +// AppendMACAddr adds MAC address to dst. +func (e Encoder) AppendMACAddr(dst []byte, ha net.HardwareAddr) []byte { + return e.AppendString(dst, ha.String()) +} diff --git a/vendor/github.com/rs/zerolog/log.go b/vendor/github.com/rs/zerolog/log.go new file mode 100644 index 00000000..d02b15e3 --- /dev/null +++ b/vendor/github.com/rs/zerolog/log.go @@ -0,0 +1,440 @@ +// Package zerolog provides a lightweight logging library dedicated to JSON logging. +// +// A global Logger can be use for simple logging: +// +// import "github.com/rs/zerolog/log" +// +// log.Info().Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world"} +// +// NOTE: To import the global logger, import the "log" subpackage "github.com/rs/zerolog/log". +// +// Fields can be added to log messages: +// +// log.Info().Str("foo", "bar").Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} +// +// Create logger instance to manage different outputs: +// +// logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +// logger.Info(). +// Str("foo", "bar"). +// Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","foo":"bar"} +// +// Sub-loggers let you chain loggers with additional context: +// +// sublogger := log.With().Str("component": "foo").Logger() +// sublogger.Info().Msg("hello world") +// // Output: {"time":1494567715,"level":"info","message":"hello world","component":"foo"} +// +// Level logging +// +// zerolog.SetGlobalLevel(zerolog.InfoLevel) +// +// log.Debug().Msg("filtered out message") +// log.Info().Msg("routed message") +// +// if e := log.Debug(); e.Enabled() { +// // Compute log output only if enabled. +// value := compute() +// e.Str("foo": value).Msg("some debug message") +// } +// // Output: {"level":"info","time":1494567715,"routed message"} +// +// Customize automatic field names: +// +// log.TimestampFieldName = "t" +// log.LevelFieldName = "p" +// log.MessageFieldName = "m" +// +// log.Info().Msg("hello world") +// // Output: {"t":1494567715,"p":"info","m":"hello world"} +// +// Log with no level and message: +// +// log.Log().Str("foo","bar").Msg("") +// // Output: {"time":1494567715,"foo":"bar"} +// +// Add contextual fields to global Logger: +// +// log.Logger = log.With().Str("foo", "bar").Logger() +// +// Sample logs: +// +// sampled := log.Sample(&zerolog.BasicSampler{N: 10}) +// sampled.Info().Msg("will be logged every 10 messages") +// +// Log with contextual hooks: +// +// // Create the hook: +// type SeverityHook struct{} +// +// func (h SeverityHook) Run(e *zerolog.Event, level zerolog.Level, msg string) { +// if level != zerolog.NoLevel { +// e.Str("severity", level.String()) +// } +// } +// +// // And use it: +// var h SeverityHook +// log := zerolog.New(os.Stdout).Hook(h) +// log.Warn().Msg("") +// // Output: {"level":"warn","severity":"warn"} +// +// +// Caveats +// +// There is no fields deduplication out-of-the-box. +// Using the same key multiple times creates new key in final JSON each time. +// +// logger := zerolog.New(os.Stderr).With().Timestamp().Logger() +// logger.Info(). +// Timestamp(). +// Msg("dup") +// // Output: {"level":"info","time":1494567715,"time":1494567715,"message":"dup"} +// +// In this case, many consumers will take the last value, +// but this is not guaranteed; check yours if in doubt. +package zerolog + +import ( + "fmt" + "io" + "io/ioutil" + "os" + "strconv" +) + +// Level defines log levels. +type Level int8 + +const ( + // DebugLevel defines debug log level. + DebugLevel Level = iota + // InfoLevel defines info log level. + InfoLevel + // WarnLevel defines warn log level. + WarnLevel + // ErrorLevel defines error log level. + ErrorLevel + // FatalLevel defines fatal log level. + FatalLevel + // PanicLevel defines panic log level. + PanicLevel + // NoLevel defines an absent log level. + NoLevel + // Disabled disables the logger. + Disabled + + // TraceLevel defines trace log level. + TraceLevel Level = -1 +) + +func (l Level) String() string { + switch l { + case TraceLevel: + return "trace" + case DebugLevel: + return "debug" + case InfoLevel: + return "info" + case WarnLevel: + return "warn" + case ErrorLevel: + return "error" + case FatalLevel: + return "fatal" + case PanicLevel: + return "panic" + case NoLevel: + return "" + } + return "" +} + +// ParseLevel converts a level string into a zerolog Level value. +// returns an error if the input string does not match known values. +func ParseLevel(levelStr string) (Level, error) { + switch levelStr { + case LevelFieldMarshalFunc(TraceLevel): + return TraceLevel, nil + case LevelFieldMarshalFunc(DebugLevel): + return DebugLevel, nil + case LevelFieldMarshalFunc(InfoLevel): + return InfoLevel, nil + case LevelFieldMarshalFunc(WarnLevel): + return WarnLevel, nil + case LevelFieldMarshalFunc(ErrorLevel): + return ErrorLevel, nil + case LevelFieldMarshalFunc(FatalLevel): + return FatalLevel, nil + case LevelFieldMarshalFunc(PanicLevel): + return PanicLevel, nil + case LevelFieldMarshalFunc(NoLevel): + return NoLevel, nil + } + return NoLevel, fmt.Errorf("Unknown Level String: '%s', defaulting to NoLevel", levelStr) +} + +// A Logger represents an active logging object that generates lines +// of JSON output to an io.Writer. Each logging operation makes a single +// call to the Writer's Write method. There is no guarantee on access +// serialization to the Writer. If your Writer is not thread safe, +// you may consider a sync wrapper. +type Logger struct { + w LevelWriter + level Level + sampler Sampler + context []byte + hooks []Hook +} + +// New creates a root logger with given output writer. If the output writer implements +// the LevelWriter interface, the WriteLevel method will be called instead of the Write +// one. +// +// Each logging operation makes a single call to the Writer's Write method. There is no +// guarantee on access serialization to the Writer. If your Writer is not thread safe, +// you may consider using sync wrapper. +func New(w io.Writer) Logger { + if w == nil { + w = ioutil.Discard + } + lw, ok := w.(LevelWriter) + if !ok { + lw = levelWriterAdapter{w} + } + return Logger{w: lw, level: TraceLevel} +} + +// Nop returns a disabled logger for which all operation are no-op. +func Nop() Logger { + return New(nil).Level(Disabled) +} + +// Output duplicates the current logger and sets w as its output. +func (l Logger) Output(w io.Writer) Logger { + l2 := New(w) + l2.level = l.level + l2.sampler = l.sampler + if len(l.hooks) > 0 { + l2.hooks = append(l2.hooks, l.hooks...) + } + if l.context != nil { + l2.context = make([]byte, len(l.context), cap(l.context)) + copy(l2.context, l.context) + } + return l2 +} + +// With creates a child logger with the field added to its context. +func (l Logger) With() Context { + context := l.context + l.context = make([]byte, 0, 500) + if context != nil { + l.context = append(l.context, context...) + } else { + // This is needed for AppendKey to not check len of input + // thus making it inlinable + l.context = enc.AppendBeginMarker(l.context) + } + return Context{l} +} + +// UpdateContext updates the internal logger's context. +// +// Use this method with caution. If unsure, prefer the With method. +func (l *Logger) UpdateContext(update func(c Context) Context) { + if l == disabledLogger { + return + } + if cap(l.context) == 0 { + l.context = make([]byte, 0, 500) + } + if len(l.context) == 0 { + l.context = enc.AppendBeginMarker(l.context) + } + c := update(Context{*l}) + l.context = c.l.context +} + +// Level creates a child logger with the minimum accepted level set to level. +func (l Logger) Level(lvl Level) Logger { + l.level = lvl + return l +} + +// GetLevel returns the current Level of l. +func (l Logger) GetLevel() Level { + return l.level +} + +// Sample returns a logger with the s sampler. +func (l Logger) Sample(s Sampler) Logger { + l.sampler = s + return l +} + +// Hook returns a logger with the h Hook. +func (l Logger) Hook(h Hook) Logger { + l.hooks = append(l.hooks, h) + return l +} + +// Trace starts a new message with trace level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Trace() *Event { + return l.newEvent(TraceLevel, nil) +} + +// Debug starts a new message with debug level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Debug() *Event { + return l.newEvent(DebugLevel, nil) +} + +// Info starts a new message with info level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Info() *Event { + return l.newEvent(InfoLevel, nil) +} + +// Warn starts a new message with warn level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Warn() *Event { + return l.newEvent(WarnLevel, nil) +} + +// Error starts a new message with error level. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Error() *Event { + return l.newEvent(ErrorLevel, nil) +} + +// Err starts a new message with error level with err as a field if not nil or +// with info level if err is nil. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Err(err error) *Event { + if err != nil { + return l.Error().Err(err) + } + + return l.Info() +} + +// Fatal starts a new message with fatal level. The os.Exit(1) function +// is called by the Msg method, which terminates the program immediately. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Fatal() *Event { + return l.newEvent(FatalLevel, func(msg string) { os.Exit(1) }) +} + +// Panic starts a new message with panic level. The panic() function +// is called by the Msg method, which stops the ordinary flow of a goroutine. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Panic() *Event { + return l.newEvent(PanicLevel, func(msg string) { panic(msg) }) +} + +// WithLevel starts a new message with level. Unlike Fatal and Panic +// methods, WithLevel does not terminate the program or stop the ordinary +// flow of a gourotine when used with their respective levels. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) WithLevel(level Level) *Event { + switch level { + case TraceLevel: + return l.Trace() + case DebugLevel: + return l.Debug() + case InfoLevel: + return l.Info() + case WarnLevel: + return l.Warn() + case ErrorLevel: + return l.Error() + case FatalLevel: + return l.newEvent(FatalLevel, nil) + case PanicLevel: + return l.newEvent(PanicLevel, nil) + case NoLevel: + return l.Log() + case Disabled: + return nil + default: + panic("zerolog: WithLevel(): invalid level: " + strconv.Itoa(int(level))) + } +} + +// Log starts a new message with no level. Setting GlobalLevel to Disabled +// will still disable events produced by this method. +// +// You must call Msg on the returned event in order to send the event. +func (l *Logger) Log() *Event { + return l.newEvent(NoLevel, nil) +} + +// Print sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Print. +func (l *Logger) Print(v ...interface{}) { + if e := l.Debug(); e.Enabled() { + e.Msg(fmt.Sprint(v...)) + } +} + +// Printf sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Printf. +func (l *Logger) Printf(format string, v ...interface{}) { + if e := l.Debug(); e.Enabled() { + e.Msg(fmt.Sprintf(format, v...)) + } +} + +// Write implements the io.Writer interface. This is useful to set as a writer +// for the standard library log. +func (l Logger) Write(p []byte) (n int, err error) { + n = len(p) + if n > 0 && p[n-1] == '\n' { + // Trim CR added by stdlog. + p = p[0 : n-1] + } + l.Log().Msg(string(p)) + return +} + +func (l *Logger) newEvent(level Level, done func(string)) *Event { + enabled := l.should(level) + if !enabled { + return nil + } + e := newEvent(l.w, level) + e.done = done + e.ch = l.hooks + if level != NoLevel { + e.Str(LevelFieldName, LevelFieldMarshalFunc(level)) + } + if l.context != nil && len(l.context) > 1 { + e.buf = enc.AppendObjectData(e.buf, l.context) + } + return e +} + +// should returns true if the log event should be logged. +func (l *Logger) should(lvl Level) bool { + if lvl < l.level || lvl < GlobalLevel() { + return false + } + if l.sampler != nil && !samplingDisabled() { + return l.sampler.Sample(lvl) + } + return true +} diff --git a/vendor/github.com/rs/zerolog/log/log.go b/vendor/github.com/rs/zerolog/log/log.go new file mode 100644 index 00000000..b96f1c14 --- /dev/null +++ b/vendor/github.com/rs/zerolog/log/log.go @@ -0,0 +1,130 @@ +// Package log provides a global logger for zerolog. +package log + +import ( + "context" + "io" + "os" + + "github.com/rs/zerolog" +) + +// Logger is the global logger. +var Logger = zerolog.New(os.Stderr).With().Timestamp().Logger() + +// Output duplicates the global logger and sets w as its output. +func Output(w io.Writer) zerolog.Logger { + return Logger.Output(w) +} + +// With creates a child logger with the field added to its context. +func With() zerolog.Context { + return Logger.With() +} + +// Level creates a child logger with the minimum accepted level set to level. +func Level(level zerolog.Level) zerolog.Logger { + return Logger.Level(level) +} + +// Sample returns a logger with the s sampler. +func Sample(s zerolog.Sampler) zerolog.Logger { + return Logger.Sample(s) +} + +// Hook returns a logger with the h Hook. +func Hook(h zerolog.Hook) zerolog.Logger { + return Logger.Hook(h) +} + +// Err starts a new message with error level with err as a field if not nil or +// with info level if err is nil. +// +// You must call Msg on the returned event in order to send the event. +func Err(err error) *zerolog.Event { + return Logger.Err(err) +} + +// Trace starts a new message with trace level. +// +// You must call Msg on the returned event in order to send the event. +func Trace() *zerolog.Event { + return Logger.Trace() +} + +// Debug starts a new message with debug level. +// +// You must call Msg on the returned event in order to send the event. +func Debug() *zerolog.Event { + return Logger.Debug() +} + +// Info starts a new message with info level. +// +// You must call Msg on the returned event in order to send the event. +func Info() *zerolog.Event { + return Logger.Info() +} + +// Warn starts a new message with warn level. +// +// You must call Msg on the returned event in order to send the event. +func Warn() *zerolog.Event { + return Logger.Warn() +} + +// Error starts a new message with error level. +// +// You must call Msg on the returned event in order to send the event. +func Error() *zerolog.Event { + return Logger.Error() +} + +// Fatal starts a new message with fatal level. The os.Exit(1) function +// is called by the Msg method. +// +// You must call Msg on the returned event in order to send the event. +func Fatal() *zerolog.Event { + return Logger.Fatal() +} + +// Panic starts a new message with panic level. The message is also sent +// to the panic function. +// +// You must call Msg on the returned event in order to send the event. +func Panic() *zerolog.Event { + return Logger.Panic() +} + +// WithLevel starts a new message with level. +// +// You must call Msg on the returned event in order to send the event. +func WithLevel(level zerolog.Level) *zerolog.Event { + return Logger.WithLevel(level) +} + +// Log starts a new message with no level. Setting zerolog.GlobalLevel to +// zerolog.Disabled will still disable events produced by this method. +// +// You must call Msg on the returned event in order to send the event. +func Log() *zerolog.Event { + return Logger.Log() +} + +// Print sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Print. +func Print(v ...interface{}) { + Logger.Print(v...) +} + +// Printf sends a log event using debug level and no extra field. +// Arguments are handled in the manner of fmt.Printf. +func Printf(format string, v ...interface{}) { + Logger.Printf(format, v...) +} + +// Ctx returns the Logger associated with the ctx. If no logger +// is associated, a disabled logger is returned. +func Ctx(ctx context.Context) *zerolog.Logger { + return zerolog.Ctx(ctx) +} diff --git a/vendor/github.com/rs/zerolog/not_go112.go b/vendor/github.com/rs/zerolog/not_go112.go new file mode 100644 index 00000000..4c43c9e7 --- /dev/null +++ b/vendor/github.com/rs/zerolog/not_go112.go @@ -0,0 +1,5 @@ +// +build !go1.12 + +package zerolog + +const contextCallerSkipFrameCount = 3 diff --git a/vendor/github.com/rs/zerolog/pretty.png b/vendor/github.com/rs/zerolog/pretty.png new file mode 100644 index 00000000..34e43085 Binary files /dev/null and b/vendor/github.com/rs/zerolog/pretty.png differ diff --git a/vendor/github.com/rs/zerolog/sampler.go b/vendor/github.com/rs/zerolog/sampler.go new file mode 100644 index 00000000..a99629eb --- /dev/null +++ b/vendor/github.com/rs/zerolog/sampler.go @@ -0,0 +1,134 @@ +package zerolog + +import ( + "math/rand" + "sync/atomic" + "time" +) + +var ( + // Often samples log every ~ 10 events. + Often = RandomSampler(10) + // Sometimes samples log every ~ 100 events. + Sometimes = RandomSampler(100) + // Rarely samples log every ~ 1000 events. + Rarely = RandomSampler(1000) +) + +// Sampler defines an interface to a log sampler. +type Sampler interface { + // Sample returns true if the event should be part of the sample, false if + // the event should be dropped. + Sample(lvl Level) bool +} + +// RandomSampler use a PRNG to randomly sample an event out of N events, +// regardless of their level. +type RandomSampler uint32 + +// Sample implements the Sampler interface. +func (s RandomSampler) Sample(lvl Level) bool { + if s <= 0 { + return false + } + if rand.Intn(int(s)) != 0 { + return false + } + return true +} + +// BasicSampler is a sampler that will send every Nth events, regardless of +// there level. +type BasicSampler struct { + N uint32 + counter uint32 +} + +// Sample implements the Sampler interface. +func (s *BasicSampler) Sample(lvl Level) bool { + n := s.N + if n == 1 { + return true + } + c := atomic.AddUint32(&s.counter, 1) + return c%n == 1 +} + +// BurstSampler lets Burst events pass per Period then pass the decision to +// NextSampler. If Sampler is not set, all subsequent events are rejected. +type BurstSampler struct { + // Burst is the maximum number of event per period allowed before calling + // NextSampler. + Burst uint32 + // Period defines the burst period. If 0, NextSampler is always called. + Period time.Duration + // NextSampler is the sampler used after the burst is reached. If nil, + // events are always rejected after the burst. + NextSampler Sampler + + counter uint32 + resetAt int64 +} + +// Sample implements the Sampler interface. +func (s *BurstSampler) Sample(lvl Level) bool { + if s.Burst > 0 && s.Period > 0 { + if s.inc() <= s.Burst { + return true + } + } + if s.NextSampler == nil { + return false + } + return s.NextSampler.Sample(lvl) +} + +func (s *BurstSampler) inc() uint32 { + now := time.Now().UnixNano() + resetAt := atomic.LoadInt64(&s.resetAt) + var c uint32 + if now > resetAt { + c = 1 + atomic.StoreUint32(&s.counter, c) + newResetAt := now + s.Period.Nanoseconds() + reset := atomic.CompareAndSwapInt64(&s.resetAt, resetAt, newResetAt) + if !reset { + // Lost the race with another goroutine trying to reset. + c = atomic.AddUint32(&s.counter, 1) + } + } else { + c = atomic.AddUint32(&s.counter, 1) + } + return c +} + +// LevelSampler applies a different sampler for each level. +type LevelSampler struct { + TraceSampler, DebugSampler, InfoSampler, WarnSampler, ErrorSampler Sampler +} + +func (s LevelSampler) Sample(lvl Level) bool { + switch lvl { + case TraceLevel: + if s.TraceSampler != nil { + return s.TraceSampler.Sample(lvl) + } + case DebugLevel: + if s.DebugSampler != nil { + return s.DebugSampler.Sample(lvl) + } + case InfoLevel: + if s.InfoSampler != nil { + return s.InfoSampler.Sample(lvl) + } + case WarnLevel: + if s.WarnSampler != nil { + return s.WarnSampler.Sample(lvl) + } + case ErrorLevel: + if s.ErrorSampler != nil { + return s.ErrorSampler.Sample(lvl) + } + } + return true +} diff --git a/vendor/github.com/rs/zerolog/syslog.go b/vendor/github.com/rs/zerolog/syslog.go new file mode 100644 index 00000000..ef3b2c83 --- /dev/null +++ b/vendor/github.com/rs/zerolog/syslog.go @@ -0,0 +1,58 @@ +// +build !windows +// +build !binary_log + +package zerolog + +import ( + "io" +) + +// SyslogWriter is an interface matching a syslog.Writer struct. +type SyslogWriter interface { + io.Writer + Debug(m string) error + Info(m string) error + Warning(m string) error + Err(m string) error + Emerg(m string) error + Crit(m string) error +} + +type syslogWriter struct { + w SyslogWriter +} + +// SyslogLevelWriter wraps a SyslogWriter and call the right syslog level +// method matching the zerolog level. +func SyslogLevelWriter(w SyslogWriter) LevelWriter { + return syslogWriter{w} +} + +func (sw syslogWriter) Write(p []byte) (n int, err error) { + return sw.w.Write(p) +} + +// WriteLevel implements LevelWriter interface. +func (sw syslogWriter) WriteLevel(level Level, p []byte) (n int, err error) { + switch level { + case TraceLevel: + case DebugLevel: + err = sw.w.Debug(string(p)) + case InfoLevel: + err = sw.w.Info(string(p)) + case WarnLevel: + err = sw.w.Warning(string(p)) + case ErrorLevel: + err = sw.w.Err(string(p)) + case FatalLevel: + err = sw.w.Emerg(string(p)) + case PanicLevel: + err = sw.w.Crit(string(p)) + case NoLevel: + err = sw.w.Info(string(p)) + default: + panic("invalid level") + } + n = len(p) + return +} diff --git a/vendor/github.com/rs/zerolog/writer.go b/vendor/github.com/rs/zerolog/writer.go new file mode 100644 index 00000000..381b0f61 --- /dev/null +++ b/vendor/github.com/rs/zerolog/writer.go @@ -0,0 +1,98 @@ +package zerolog + +import ( + "io" + "sync" +) + +// LevelWriter defines as interface a writer may implement in order +// to receive level information with payload. +type LevelWriter interface { + io.Writer + WriteLevel(level Level, p []byte) (n int, err error) +} + +type levelWriterAdapter struct { + io.Writer +} + +func (lw levelWriterAdapter) WriteLevel(l Level, p []byte) (n int, err error) { + return lw.Write(p) +} + +type syncWriter struct { + mu sync.Mutex + lw LevelWriter +} + +// SyncWriter wraps w so that each call to Write is synchronized with a mutex. +// This syncer can be used to wrap the call to writer's Write method if it is +// not thread safe. Note that you do not need this wrapper for os.File Write +// operations on POSIX and Windows systems as they are already thread-safe. +func SyncWriter(w io.Writer) io.Writer { + if lw, ok := w.(LevelWriter); ok { + return &syncWriter{lw: lw} + } + return &syncWriter{lw: levelWriterAdapter{w}} +} + +// Write implements the io.Writer interface. +func (s *syncWriter) Write(p []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.lw.Write(p) +} + +// WriteLevel implements the LevelWriter interface. +func (s *syncWriter) WriteLevel(l Level, p []byte) (n int, err error) { + s.mu.Lock() + defer s.mu.Unlock() + return s.lw.WriteLevel(l, p) +} + +type multiLevelWriter struct { + writers []LevelWriter +} + +func (t multiLevelWriter) Write(p []byte) (n int, err error) { + for _, w := range t.writers { + n, err = w.Write(p) + if err != nil { + return + } + if n != len(p) { + err = io.ErrShortWrite + return + } + } + return len(p), nil +} + +func (t multiLevelWriter) WriteLevel(l Level, p []byte) (n int, err error) { + for _, w := range t.writers { + n, err = w.WriteLevel(l, p) + if err != nil { + return + } + if n != len(p) { + err = io.ErrShortWrite + return + } + } + return len(p), nil +} + +// MultiLevelWriter creates a writer that duplicates its writes to all the +// provided writers, similar to the Unix tee(1) command. If some writers +// implement LevelWriter, their WriteLevel method will be used instead of Write. +func MultiLevelWriter(writers ...io.Writer) LevelWriter { + lwriters := make([]LevelWriter, 0, len(writers)) + for _, w := range writers { + if lw, ok := w.(LevelWriter); ok { + lwriters = append(lwriters, lw) + } else { + lwriters = append(lwriters, levelWriterAdapter{w}) + } + } + return multiLevelWriter{lwriters} +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 6b3c2e4b..676f85cb 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -283,6 +283,12 @@ github.com/prometheus/procfs/internal/util github.com/rivo/tview # github.com/rivo/uniseg v0.1.0 github.com/rivo/uniseg +# github.com/rs/zerolog v1.20.0 +## explicit +github.com/rs/zerolog +github.com/rs/zerolog/internal/cbor +github.com/rs/zerolog/internal/json +github.com/rs/zerolog/log # github.com/russross/blackfriday/v2 v2.0.1 github.com/russross/blackfriday/v2 # github.com/shurcooL/sanitized_anchor_name v1.0.0 diff --git a/websocket/websocket.go b/websocket/websocket.go index d600cb9d..cbb5d8cf 100644 --- a/websocket/websocket.go +++ b/websocket/websocket.go @@ -1,13 +1,9 @@ package websocket import ( - "bufio" "crypto/sha1" "crypto/tls" "encoding/base64" - "encoding/binary" - "encoding/json" - "errors" "io" "net" "net/http" @@ -15,9 +11,9 @@ import ( "time" "github.com/cloudflare/cloudflared/h2mux" - "github.com/cloudflare/cloudflared/logger" - "github.com/cloudflare/cloudflared/sshserver" + "github.com/gorilla/websocket" + "github.com/rs/zerolog" ) const ( @@ -102,31 +98,17 @@ func ClientConnect(req *http.Request, dialler Dialler) (*websocket.Conn, *http.R return conn, response, err } -// HijackConnection takes over an HTTP connection. Caller is responsible for closing connection. -func HijackConnection(w http.ResponseWriter) (net.Conn, *bufio.ReadWriter, error) { - hj, ok := w.(http.Hijacker) - if !ok { - return nil, nil, errors.New("hijack error") - } - - conn, brw, err := hj.Hijack() - if err != nil { - return nil, nil, err - } - return conn, brw, nil -} - // Stream copies copy data to & from provided io.ReadWriters. func Stream(conn, backendConn io.ReadWriter) { proxyDone := make(chan struct{}, 2) go func() { - io.Copy(conn, backendConn) + _, _ = io.Copy(conn, backendConn) proxyDone <- struct{}{} }() go func() { - io.Copy(backendConn, conn) + _, _ = io.Copy(backendConn, conn) proxyDone <- struct{}{} }() @@ -142,14 +124,20 @@ func DefaultStreamHandler(wsConn *Conn, remoteConn net.Conn, _ http.Header) { // StartProxyServer will start a websocket server that will decode // the websocket data and write the resulting data to the provided -func StartProxyServer(logger logger.Service, listener net.Listener, staticHost string, shutdownC <-chan struct{}, streamHandler func(wsConn *Conn, remoteConn net.Conn, requestHeaders http.Header)) error { +func StartProxyServer( + log *zerolog.Logger, + listener net.Listener, + staticHost string, + shutdownC <-chan struct{}, + streamHandler func(wsConn *Conn, remoteConn net.Conn, requestHeaders http.Header), +) error { upgrader := websocket.Upgrader{ ReadBufferSize: 1024, WriteBufferSize: 1024, } h := handler{ upgrader: upgrader, - logger: logger, + log: log, staticHost: staticHost, streamHandler: streamHandler, } @@ -157,7 +145,7 @@ func StartProxyServer(logger logger.Service, listener net.Listener, staticHost s httpServer := &http.Server{Addr: listener.Addr().String(), Handler: &h} go func() { <-shutdownC - httpServer.Close() + _ = httpServer.Close() }() return httpServer.Serve(listener) @@ -165,7 +153,7 @@ func StartProxyServer(logger logger.Service, listener net.Listener, staticHost s // HTTP handler for the websocket proxy. type handler struct { - logger logger.Service + log *zerolog.Logger staticHost string upgrader websocket.Upgrader streamHandler func(wsConn *Conn, remoteConn net.Conn, requestHeaders http.Header) @@ -176,7 +164,7 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { finalDestination := h.staticHost if finalDestination == "" { if jumpDestination := r.Header.Get(h2mux.CFJumpDestinationHeader); jumpDestination == "" { - h.logger.Error("Did not receive final destination from client. The --destination flag is likely not set") + h.log.Error().Msg("Did not receive final destination from client. The --destination flag is likely not set") return } else { finalDestination = jumpDestination @@ -185,58 +173,32 @@ func (h *handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { stream, err := net.Dial("tcp", finalDestination) if err != nil { - h.logger.Errorf("Cannot connect to remote: %s", err) + h.log.Error().Msgf("Cannot connect to remote: %s", err) return } defer stream.Close() if !websocket.IsWebSocketUpgrade(r) { - w.Write(nonWebSocketRequestPage()) + _, _ = w.Write(nonWebSocketRequestPage()) return } conn, err := h.upgrader.Upgrade(w, r, nil) if err != nil { - h.logger.Errorf("failed to upgrade: %s", err) + h.log.Error().Msgf("failed to upgrade: %s", err) return } - conn.SetReadDeadline(time.Now().Add(pongWait)) - conn.SetPongHandler(func(string) error { conn.SetReadDeadline(time.Now().Add(pongWait)); return nil }) + _ = conn.SetReadDeadline(time.Now().Add(pongWait)) + conn.SetPongHandler(func(string) error { _ = conn.SetReadDeadline(time.Now().Add(pongWait)); return nil }) done := make(chan struct{}) - go pinger(h.logger, conn, done) + go pinger(h.log, conn, done) defer func() { done <- struct{}{} - conn.Close() + _ = conn.Close() }() h.streamHandler(&Conn{conn}, stream, r.Header) } -// SendSSHPreamble sends the final SSH destination address to the cloudflared SSH proxy -// The destination is preceded by its length -// Not part of sshserver module to fix compilation for incompatible operating systems -func SendSSHPreamble(stream net.Conn, destination, token string) error { - preamble := sshserver.SSHPreamble{Destination: destination, JWT: token} - payload, err := json.Marshal(preamble) - if err != nil { - return err - } - - if uint16(len(payload)) > ^uint16(0) { - return errors.New("ssh preamble payload too large") - } - - sizeBytes := make([]byte, sshserver.SSHPreambleLength) - binary.BigEndian.PutUint16(sizeBytes, uint16(len(payload))) - if _, err := stream.Write(sizeBytes); err != nil { - return err - } - - if _, err := stream.Write(payload); err != nil { - return err - } - return nil -} - // the gorilla websocket library sets its own Upgrade, Connection, Sec-WebSocket-Key, // Sec-WebSocket-Version and Sec-Websocket-Extensions headers. // https://github.com/gorilla/websocket/blob/master/client.go#L189-L194. @@ -256,7 +218,7 @@ func websocketHeaders(req *http.Request) http.Header { // sha1Base64 sha1 and then base64 encodes str. func sha1Base64(str string) string { hasher := sha1.New() - io.WriteString(hasher, str) + _, _ = io.WriteString(hasher, str) hash := hasher.Sum(nil) return base64.StdEncoding.EncodeToString(hash) } @@ -283,14 +245,14 @@ func ChangeRequestScheme(reqURL *url.URL) string { } // pinger simulates the websocket connection to keep it alive -func pinger(logger logger.Service, ws *websocket.Conn, done chan struct{}) { +func pinger(logger *zerolog.Logger, ws *websocket.Conn, done chan struct{}) { ticker := time.NewTicker(pingPeriod) defer ticker.Stop() for { select { case <-ticker.C: if err := ws.WriteControl(websocket.PingMessage, []byte{}, time.Now().Add(writeWait)); err != nil { - logger.Debugf("failed to send ping message: %s", err) + logger.Debug().Msgf("failed to send ping message: %s", err) } case <-done: return diff --git a/websocket/websocket_test.go b/websocket/websocket_test.go index 3a94ed51..5b57b6c2 100644 --- a/websocket/websocket_test.go +++ b/websocket/websocket_test.go @@ -3,14 +3,15 @@ package websocket import ( "crypto/tls" "crypto/x509" + "github.com/rs/zerolog" "io" "math/rand" "net/http" "testing" "github.com/cloudflare/cloudflared/hello" - "github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/tlsconfig" + "github.com/stretchr/testify/assert" "golang.org/x/net/websocket" ) @@ -62,7 +63,7 @@ func TestGenerateAcceptKey(t *testing.T) { } func TestServe(t *testing.T) { - logger := logger.NewOutputWriter(logger.NewMockWriteManager()) + log := zerolog.Nop() shutdownC := make(chan struct{}) errC := make(chan error) listener, err := hello.CreateTLSListener("localhost:1111") @@ -70,7 +71,7 @@ func TestServe(t *testing.T) { defer listener.Close() go func() { - errC <- hello.StartHelloWorldServer(logger, listener, shutdownC) + errC <- hello.StartHelloWorldServer(&log, listener, shutdownC) }() req := testRequest(t, "https://localhost:1111/ws", nil) @@ -96,7 +97,7 @@ func TestServe(t *testing.T) { assert.Equal(t, clientMessage, message) } - conn.Close() + _ = conn.Close() close(shutdownC) <-errC } @@ -106,7 +107,7 @@ func TestServe(t *testing.T) { // remoteAddress := "localhost:1113" // listenerAddress := "localhost:1112" // message := "Good morning Austin! Time for another sunny day in the great state of Texas." -// logger := logger.NewOutputWriter(logger.NewMockWriteManager()) +// logger := zerolog.Nop() // shutdownC := make(chan struct{}) // listener, err := net.Listen("tcp", listenerAddress)