diff --git a/Gopkg.lock b/Gopkg.lock index 26a0104b..1a99b7a3 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -220,6 +220,14 @@ revision = "2eee05ed794112d45db504eb05aa693efd2b8b09" version = "v0.1.0" +[[projects]] + digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de" + name = "github.com/konsorten/go-windows-terminal-sequences" + packages = ["."] + pruneopts = "UT" + revision = "f55edac94c9bbba5d6182a4be46d86a2c9b5b50e" + version = "v1.0.2" + [[projects]] digest = "1:bc1c0be40c67b6b4aee09d7508d5a2a52c1c116b1fa43806dad2b0d6b4d4003b" name = "github.com/lib/pq" @@ -359,12 +367,12 @@ version = "v2.4" [[projects]] - digest = "1:5f2aaa360f48d1711795bd88c7e45a38f86cf81e4bc01453d20983baa67e2d51" + digest = "1:04457f9f6f3ffc5fea48e71d62f2ca256637dee0a04d710288e27e05c8b41976" name = "github.com/sirupsen/logrus" packages = ["."] pruneopts = "UT" - revision = "f006c2ac4710855cf0f916dd6b77acf6b048dc6e" - version = "v1.0.3" + revision = "839c75faf7f98a33d445d181f3018b5c3409a45e" + version = "v1.4.2" [[projects]] digest = "1:f85e109eda8f6080877185d1c39e98dd8795e1780c08beca28304b87fd855a1c" diff --git a/Gopkg.toml b/Gopkg.toml index d90a3dc9..bc97202a 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -26,7 +26,7 @@ [[constraint]] name = "github.com/sirupsen/logrus" - version = "=1.0.3" + version = "=1.4.2" [[constraint]] name = "github.com/stretchr/testify" diff --git a/Makefile b/Makefile index da63e656..402f3af6 100644 --- a/Makefile +++ b/Makefile @@ -78,6 +78,6 @@ tunnelrpc/tunnelrpc.capnp.go: tunnelrpc/tunnelrpc.capnp .PHONY: vet vet: - go vet ./... + go vet -composites=false ./... which go-sumtype # go get github.com/BurntSushi/go-sumtype go-sumtype $$(go list ./...) diff --git a/RELEASE_NOTES b/RELEASE_NOTES index 9a67fc41..e22c2d1c 100644 --- a/RELEASE_NOTES +++ b/RELEASE_NOTES @@ -1,3 +1,18 @@ +2019.7.0 +- 2019-05-28 TUN-1913: Define OriginService for each type of origin +- 2019-04-29 Build a docker container +- 2019-06-12 TUN-1952: Group ClientConfig fields by the component that uses the config, and return the part of the config that failed to be applied +- 2019-06-05 TUN-1893: Proxy requests to the origin based on tunnel hostname +- 2019-06-17 TUN-1961: Create EdgeConnectionManager to maintain outbound connections to the edge +- 2019-06-18 TUN-1885: Reconfigure cloudflared on receiving new ClientConfig +- 2019-06-19 TUN-1976: Pass tunnel hostname through header +- 2019-06-20 TUN-1982: Load custom origin CA when OriginCAPool is specified +- 2019-06-26 TUN-2005: Upgrade logrus +- 2019-06-20 TUN-1981: Write response header & body on proxy error to notify eyeballs of failure category +- 2019-06-20 TUN-1977: Validate OriginConfig has valid URL, and use scheme to determine if a HTTPOriginService is expecting HTTP or Unix +- 2019-06-13 DoH: change the media type to application/dns-message +- 2019-06-26 AUTH-1736: Better handling of token revocation + 2019.6.0 - 2019-05-17 TUN-1828: Update declarative tunnel config struct - 2019-05-29 Handle exit code on err diff --git a/carrier/carrier.go b/carrier/carrier.go index 71541ab4..e5741169 100644 --- a/carrier/carrier.go +++ b/carrier/carrier.go @@ -4,7 +4,6 @@ package carrier import ( - "errors" "io" "net" "net/http" @@ -12,7 +11,8 @@ import ( "strings" "github.com/cloudflare/cloudflared/cmd/cloudflared/token" - "github.com/cloudflare/cloudflared/websocket" + cloudflaredWebsocket "github.com/cloudflare/cloudflared/websocket" + "github.com/gorilla/websocket" "github.com/sirupsen/logrus" ) @@ -37,6 +37,13 @@ func (c *StdinoutStream) Write(p []byte) (int, error) { return os.Stdout.Write(p) } +// Helper to allow defering the response close with a check that the resp is not nil +func closeRespBody(resp *http.Response) { + if resp != nil { + resp.Body.Close() + } +} + // StartClient will copy the data from stdin/stdout over a WebSocket connection // to the edge (originURL) func StartClient(logger *logrus.Logger, stream io.ReadWriter, options *StartOptions) error { @@ -90,7 +97,7 @@ func serveStream(logger *logrus.Logger, conn io.ReadWriter, options *StartOption } defer wsConn.Close() - websocket.Stream(wsConn, conn) + cloudflaredWebsocket.Stream(wsConn, conn) return nil } @@ -98,28 +105,17 @@ func serveStream(logger *logrus.Logger, conn io.ReadWriter, options *StartOption // createWebsocketStream will create a WebSocket connection to stream data over // It also handles redirects from Access and will present that flow if // the token is not present on the request -func createWebsocketStream(options *StartOptions) (*websocket.Conn, error) { +func createWebsocketStream(options *StartOptions) (*cloudflaredWebsocket.Conn, error) { req, err := http.NewRequest(http.MethodGet, options.OriginURL, nil) if err != nil { return nil, err } req.Header = options.Headers - wsConn, resp, err := websocket.ClientConnect(req, nil) - if err != nil && resp != nil && resp.StatusCode > 300 { - location, err := resp.Location() - if err != nil { - return nil, err - } - if !strings.Contains(location.String(), "cdn-cgi/access/login") { - return nil, errors.New("not an Access redirect") - } - req, err := buildAccessRequest(options.OriginURL) - if err != nil { - return nil, err - } - - wsConn, _, err = websocket.ClientConnect(req, nil) + wsConn, resp, err := cloudflaredWebsocket.ClientConnect(req, nil) + defer closeRespBody(resp) + if err != nil && isAccessResponse(resp) { + wsConn, err = createAccessAuthenticatedStream(options) if err != nil { return nil, err } @@ -127,12 +123,72 @@ func createWebsocketStream(options *StartOptions) (*websocket.Conn, error) { return nil, err } - return &websocket.Conn{Conn: wsConn}, nil + return &cloudflaredWebsocket.Conn{Conn: wsConn}, nil +} + +// isAccessResponse checks the http Response to see if the url location +// contains the Access structure. +func isAccessResponse(resp *http.Response) bool { + if resp == nil || resp.StatusCode <= 300 { + return false + } + + location, err := resp.Location() + if err != nil || location == nil { + return false + } + if strings.HasPrefix(location.Path, "/cdn-cgi/access/login") { + return true + } + + return false +} + +// createAccessAuthenticatedStream will try load a token from storage and make +// a connection with the token set on the request. If it still get redirect, +// this probably means the token in storage is invalid (expired/revoked). If that +// happens it deletes the token and runs the connection again, so the user can +// login again and generate a new one. +func createAccessAuthenticatedStream(options *StartOptions) (*websocket.Conn, error) { + wsConn, resp, err := createAccessWebSocketStream(options) + defer closeRespBody(resp) + if err == nil { + return wsConn, nil + } + + if !isAccessResponse(resp) { + return nil, err + } + + // Access Token is invalid for some reason. Go through regen flow + originReq, err := http.NewRequest(http.MethodGet, options.OriginURL, nil) + if err != nil { + return nil, err + } + if err := token.RemoveTokenIfExists(originReq.URL); err != nil { + return nil, err + } + wsConn, resp, err = createAccessWebSocketStream(options) + defer closeRespBody(resp) + if err != nil { + return nil, err + } + + return wsConn, nil +} + +// createAccessWebSocketStream builds an Access request and makes a connection +func createAccessWebSocketStream(options *StartOptions) (*websocket.Conn, *http.Response, error) { + req, err := buildAccessRequest(options) + if err != nil { + return nil, nil, err + } + return cloudflaredWebsocket.ClientConnect(req, nil) } // buildAccessRequest builds an HTTP request with the Access token set -func buildAccessRequest(originURL string) (*http.Request, error) { - req, err := http.NewRequest(http.MethodGet, originURL, nil) +func buildAccessRequest(options *StartOptions) (*http.Request, error) { + req, err := http.NewRequest(http.MethodGet, options.OriginURL, nil) if err != nil { return nil, err } @@ -144,11 +200,17 @@ func buildAccessRequest(originURL string) (*http.Request, error) { // We need to create a new request as FetchToken will modify req (boo mutable) // as it has to follow redirect on the API and such, so here we init a new one - originRequest, err := http.NewRequest(http.MethodGet, originURL, nil) + originRequest, err := http.NewRequest(http.MethodGet, options.OriginURL, nil) if err != nil { return nil, err } originRequest.Header.Set("cf-access-token", token) + for k, v := range options.Headers { + if len(v) >= 1 { + originRequest.Header.Set(k, v[0]) + } + } + return originRequest, nil } diff --git a/carrier/carrier_test.go b/carrier/carrier_test.go index e4c3d520..5360f092 100644 --- a/carrier/carrier_test.go +++ b/carrier/carrier_test.go @@ -91,6 +91,31 @@ func TestStartServer(t *testing.T) { assert.Equal(t, string(readBuffer), message) } +func TestIsAccessResponse(t *testing.T) { + validLocationHeader := http.Header{} + validLocationHeader.Add("location", "https://test.cloudflareaccess.com/cdn-cgi/access/login/blahblah") + invalidLocationHeader := http.Header{} + invalidLocationHeader.Add("location", "https://google.com") + testCases := []struct { + Description string + In *http.Response + ExpectedOut bool + }{ + {"nil response", nil, false}, + {"redirect with no location", &http.Response{StatusCode: http.StatusPermanentRedirect}, false}, + {"200 ok", &http.Response{StatusCode: http.StatusOK}, false}, + {"redirect with location", &http.Response{StatusCode: http.StatusPermanentRedirect, Header: validLocationHeader}, true}, + {"redirect with invalid location", &http.Response{StatusCode: http.StatusPermanentRedirect, Header: invalidLocationHeader}, false}, + } + + for i, tc := range testCases { + if isAccessResponse(tc.In) != tc.ExpectedOut { + t.Fatalf("Failed case %d -- %s", i, tc.Description) + } + } + +} + func newTestWebSocketServer() *httptest.Server { upgrader := ws.Upgrader{ ReadBufferSize: 1024, diff --git a/cmd/cloudflared/buildinfo/build_info.go b/cmd/cloudflared/buildinfo/build_info.go new file mode 100644 index 00000000..80481716 --- /dev/null +++ b/cmd/cloudflared/buildinfo/build_info.go @@ -0,0 +1,28 @@ +package buildinfo + +import ( + "runtime" + + "github.com/sirupsen/logrus" +) + +type BuildInfo struct { + GoOS string `json:"go_os"` + GoVersion string `json:"go_version"` + GoArch string `json:"go_arch"` + CloudflaredVersion string `json:"cloudflared_version"` +} + +func GetBuildInfo(cloudflaredVersion string) *BuildInfo { + return &BuildInfo{ + GoOS: runtime.GOOS, + GoVersion: runtime.Version(), + GoArch: runtime.GOARCH, + CloudflaredVersion: cloudflaredVersion, + } +} + +func (bi *BuildInfo) Log(logger *logrus.Logger) { + logger.Infof("Version %s", bi.CloudflaredVersion) + logger.Infof("GOOS: %s, GOVersion: %s, GoArch: %s", bi.GoOS, bi.GoVersion, bi.GoArch) +} diff --git a/cmd/cloudflared/token/token.go b/cmd/cloudflared/token/token.go index a7655e4f..8a17076f 100644 --- a/cmd/cloudflared/token/token.go +++ b/cmd/cloudflared/token/token.go @@ -1,15 +1,18 @@ package token import ( + "context" + "fmt" "io/ioutil" "net/url" - "time" + "os" + "github.com/cloudflare/cloudflared/cmd/cloudflared/config" "github.com/cloudflare/cloudflared/cmd/cloudflared/path" "github.com/cloudflare/cloudflared/cmd/cloudflared/transfer" "github.com/cloudflare/cloudflared/log" + "github.com/cloudflare/cloudflared/origin" "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/oidc" ) const ( @@ -18,6 +21,58 @@ const ( var logger = log.CreateLogger() +type lock struct { + lockFilePath string + backoff *origin.BackoffHandler +} + +func errDeleteTokenFailed(lockFilePath string) error { + return fmt.Errorf("failed to acquire a new Access token. Please try to delete %s", lockFilePath) +} + +// newLock will get a new file lock +func newLock(path string) *lock { + lockPath := path + ".lock" + return &lock{ + lockFilePath: lockPath, + backoff: &origin.BackoffHandler{MaxRetries: 7}, + } +} + +func (l *lock) Acquire() error { + // Check for a path.lock file + // if the lock file exists; start polling + // if not, create the lock file and go through the normal flow. + // See AUTH-1736 for the reason why we do all this + for isTokenLocked(l.lockFilePath) { + if l.backoff.Backoff(context.Background()) { + continue + } else { + return errDeleteTokenFailed(l.lockFilePath) + } + } + + // Create a lock file so other processes won't also try to get the token at + // the same time + if err := ioutil.WriteFile(l.lockFilePath, []byte{}, 0600); err != nil { + return err + } + return nil +} + +func (l *lock) Release() error { + if err := os.Remove(l.lockFilePath); err != nil && !os.IsNotExist(err) { + return errDeleteTokenFailed(l.lockFilePath) + } + return nil +} + +// isTokenLocked checks to see if there is another process attempting to get the token already +func isTokenLocked(lockFilePath string) bool { + exists, err := config.FileExists(lockFilePath) + return exists && err == nil +} + // FetchToken will either load a stored token or generate a new one func FetchToken(appURL *url.URL) (string, error) { if token, err := GetTokenIfExists(appURL); token != "" && err == nil { @@ -29,6 +84,18 @@ func FetchToken(appURL *url.URL) (string, error) { return "", err } + lock := newLock(path) + err = lock.Acquire() + if err != nil { + return "", err + } + defer lock.Release() + + // check to see if another process has gotten a token while we waited for the lock + if token, err := GetTokenIfExists(appURL); token != "" && err == nil { + return token, nil + } + // this weird parameter is the resource name (token) and the key/value // we want to send to the transfer service. the key is token and the value // is blank (basically just the id generated in the transfer service) @@ -55,14 +122,18 @@ func GetTokenIfExists(url *url.URL) (string, error) { return "", err } - claims, err := token.Claims() - if err != nil { - return "", err - } - ident, err := oidc.IdentityFromClaims(claims) - // AUTH-1404, reauth if the token is about to expire within 15 minutes - if err == nil && ident.ExpiresAt.After(time.Now().Add(time.Minute*15)) { - return token.Encode(), nil - } - return "", err + return token.Encode(), nil +} + +// RemoveTokenIfExists removes the a token from local storage if it exists +func RemoveTokenIfExists(url *url.URL) error { + path, err := path.GenerateFilePathFromURL(url, keyName) + if err != nil { + return err + } + if err := os.Remove(path); err != nil && !os.IsNotExist(err) { + return err + } + + return nil } diff --git a/cmd/cloudflared/tunnel/cmd.go b/cmd/cloudflared/tunnel/cmd.go index 9888b5aa..3b925765 100644 --- a/cmd/cloudflared/tunnel/cmd.go +++ b/cmd/cloudflared/tunnel/cmd.go @@ -1,6 +1,7 @@ package tunnel import ( + "context" "fmt" "io/ioutil" "net" @@ -11,9 +12,17 @@ import ( "syscall" "time" + "github.com/cloudflare/cloudflared/h2mux" + "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + + "github.com/cloudflare/cloudflared/connection" + "github.com/cloudflare/cloudflared/supervisor" + "github.com/google/uuid" + "github.com/getsentry/raven-go" "golang.org/x/crypto/ssh/terminal" + "github.com/cloudflare/cloudflared/cmd/cloudflared/buildinfo" "github.com/cloudflare/cloudflared/cmd/cloudflared/config" "github.com/cloudflare/cloudflared/cmd/cloudflared/updater" "github.com/cloudflare/cloudflared/cmd/sqlgateway" @@ -235,9 +244,8 @@ func StartServer(c *cli.Context, version string, shutdownC, graceShutdownC chan return err } - buildInfo := origin.GetBuildInfo() - logger.Infof("Build info: %+v", *buildInfo) - logger.Infof("Version %s", version) + buildInfo := buildinfo.GetBuildInfo(version) + buildInfo.Log(logger) logClientOptions(c) if c.IsSet("proxy-dns") { @@ -253,16 +261,6 @@ func StartServer(c *cli.Context, version string, shutdownC, graceShutdownC chan // Wait for proxy-dns to come up (if used) <-dnsReadySignal - // update needs to be after DNS proxy is up to resolve equinox server address - if updater.IsAutoupdateEnabled(c) { - logger.Infof("Autoupdate frequency is set to %v", c.Duration("autoupdate-freq")) - wg.Add(1) - go func() { - defer wg.Done() - errC <- updater.Autoupdate(c.Duration("autoupdate-freq"), &listeners, shutdownC) - }() - } - metricsListener, err := listeners.Listen("tcp", c.String("metrics")) if err != nil { logger.WithError(err).Error("Error opening metrics server listener") @@ -280,6 +278,33 @@ func StartServer(c *cli.Context, version string, shutdownC, graceShutdownC chan go writePidFile(connectedSignal, c.String("pidfile")) } + cloudflaredID, err := uuid.NewRandom() + if err != nil { + logger.WithError(err).Error("Cannot generate cloudflared ID") + return err + } + + ctx, cancel := context.WithCancel(context.Background()) + go func() { + <-shutdownC + cancel() + }() + + if c.IsSet("use-declarative-tunnels") { + return startDeclarativeTunnel(ctx, c, cloudflaredID, buildInfo, &listeners) + } + + // update needs to be after DNS proxy is up to resolve equinox server address + if updater.IsAutoupdateEnabled(c) { + logger.Infof("Autoupdate frequency is set to %v", c.Duration("autoupdate-freq")) + wg.Add(1) + go func() { + defer wg.Done() + autoupdater := updater.NewAutoUpdater(c.Duration("autoupdate-freq"), &listeners) + errC <- autoupdater.Run(ctx) + }() + } + // Serve DNS proxy stand-alone if no hostname or tag or app is going to run if dnsProxyStandAlone(c) { connectedSignal.Notify() @@ -288,6 +313,7 @@ func StartServer(c *cli.Context, version string, shutdownC, graceShutdownC chan } if c.IsSet("hello-world") { + logger.Infof("hello-world set") helloListener, err := hello.CreateTLSListener("127.0.0.1:") if err != nil { logger.WithError(err).Error("Cannot start Hello World Server") @@ -324,7 +350,7 @@ func StartServer(c *cli.Context, version string, shutdownC, graceShutdownC chan wg.Add(1) go func() { defer wg.Done() - errC <- origin.StartTunnelDaemon(tunnelConfig, graceShutdownC, connectedSignal) + errC <- origin.StartTunnelDaemon(ctx, tunnelConfig, connectedSignal, cloudflaredID) }() return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, c.Duration("grace-period")) @@ -349,6 +375,110 @@ func Before(c *cli.Context) error { return nil } +func startDeclarativeTunnel(ctx context.Context, + c *cli.Context, + cloudflaredID uuid.UUID, + buildInfo *buildinfo.BuildInfo, + listeners *gracenet.Net, +) error { + reverseProxyOrigin, err := defaultOriginConfig(c) + if err != nil { + logger.WithError(err) + return err + } + defaultClientConfig := &pogs.ClientConfig{ + Version: pogs.InitVersion(), + SupervisorConfig: &pogs.SupervisorConfig{ + AutoUpdateFrequency: c.Duration("autoupdate-freq"), + MetricsUpdateFrequency: c.Duration("metrics-update-freq"), + GracePeriod: c.Duration("grace-period"), + }, + EdgeConnectionConfig: &pogs.EdgeConnectionConfig{ + NumHAConnections: uint8(c.Int("ha-connections")), + HeartbeatInterval: c.Duration("heartbeat-interval"), + Timeout: c.Duration("dial-edge-timeout"), + MaxFailedHeartbeats: c.Uint64("heartbeat-count"), + }, + DoHProxyConfigs: []*pogs.DoHProxyConfig{}, + ReverseProxyConfigs: []*pogs.ReverseProxyConfig{ + { + TunnelHostname: h2mux.TunnelHostname(c.String("hostname")), + Origin: reverseProxyOrigin, + }, + }, + } + + autoupdater := updater.NewAutoUpdater(defaultClientConfig.SupervisorConfig.AutoUpdateFrequency, listeners) + + originCert, err := getOriginCert(c) + if err != nil { + logger.WithError(err).Error("error getting origin cert") + return err + } + toEdgeTLSConfig, err := tlsconfig.CreateTunnelConfig(c) + if err != nil { + logger.WithError(err).Error("unable to create TLS config to connect with edge") + return err + } + + tags, err := NewTagSliceFromCLI(c.StringSlice("tag")) + if err != nil { + logger.WithError(err).Error("unable to parse tag") + return err + } + + cloudflaredConfig := &connection.CloudflaredConfig{ + CloudflaredID: cloudflaredID, + Tags: tags, + BuildInfo: buildInfo, + } + + serviceDiscoverer, err := serviceDiscoverer(c, logger) + if err != nil { + logger.WithError(err).Error("unable to create service discoverer") + return err + } + supervisor, err := supervisor.NewSupervisor(defaultClientConfig, originCert, toEdgeTLSConfig, + serviceDiscoverer, cloudflaredConfig, autoupdater, updater.SupportAutoUpdate(), logger) + if err != nil { + logger.WithError(err).Error("unable to create Supervisor") + return err + } + return supervisor.Run(ctx) +} + +func defaultOriginConfig(c *cli.Context) (pogs.OriginConfig, error) { + if c.IsSet("hello-world") { + return &pogs.HelloWorldOriginConfig{}, nil + } + originConfig := &pogs.HTTPOriginConfig{ + TCPKeepAlive: c.Duration("proxy-tcp-keepalive"), + DialDualStack: !c.Bool("proxy-no-happy-eyeballs"), + TLSHandshakeTimeout: c.Duration("proxy-tls-timeout"), + TLSVerify: !c.Bool("no-tls-verify"), + OriginCAPool: c.String("origin-ca-pool"), + OriginServerName: c.String("origin-server-name"), + MaxIdleConnections: c.Uint64("proxy-keepalive-connections"), + IdleConnectionTimeout: c.Duration("proxy-keepalive-timeout"), + ProxyConnectionTimeout: c.Duration("proxy-connection-timeout"), + ExpectContinueTimeout: c.Duration("proxy-expect-continue-timeout"), + ChunkedEncoding: c.Bool("no-chunked-encoding"), + } + if c.IsSet("unix-socket") { + unixSocket, err := config.ValidateUnixSocket(c) + if err != nil { + return nil, errors.Wrap(err, "error validating --unix-socket") + } + originConfig.URLString = unixSocket + } + originAddr, err := config.ValidateUrl(c) + if err != nil { + return nil, errors.Wrap(err, "error validating origin URL") + } + originConfig.URLString = originAddr + return originConfig, nil +} + func waitToShutdown(wg *sync.WaitGroup, errC chan error, shutdownC, graceShutdownC chan struct{}, @@ -422,8 +552,8 @@ func tunnelFlags(shouldHide bool) []cli.Flag { }, altsrc.NewDurationFlag(&cli.DurationFlag{ Name: "autoupdate-freq", - Usage: "Autoupdate frequency. Default is 24h.", - Value: time.Hour * 24, + Usage: fmt.Sprintf("Autoupdate frequency. Default is %v.", updater.DefaultCheckUpdateFreq), + Value: updater.DefaultCheckUpdateFreq, Hidden: shouldHide, }), altsrc.NewBoolFlag(&cli.BoolFlag{ @@ -643,6 +773,18 @@ func tunnelFlags(shouldHide bool) []cli.Flag { Value: time.Second * 90, Hidden: shouldHide, }), + altsrc.NewDurationFlag(&cli.DurationFlag{ + Name: "proxy-connection-timeout", + Usage: "HTTP proxy timeout for closing an idle connection", + Value: time.Second * 90, + Hidden: shouldHide, + }), + altsrc.NewDurationFlag(&cli.DurationFlag{ + Name: "proxy-expect-continue-timeout", + Usage: "HTTP proxy timeout for closing an idle connection", + Value: time.Second * 90, + Hidden: shouldHide, + }), altsrc.NewBoolFlag(&cli.BoolFlag{ Name: "proxy-dns", Usage: "Run a DNS over HTTPS proxy server.", @@ -702,5 +844,12 @@ func tunnelFlags(shouldHide bool) []cli.Flag { EnvVars: []string{"TUNNEL_USE_DECLARATIVE"}, Hidden: true, }), + altsrc.NewDurationFlag(&cli.DurationFlag{ + Name: "dial-edge-timeout", + Usage: "Maximum wait time to set up a connection with the edge", + Value: time.Second * 15, + EnvVars: []string{"DIAL_EDGE_TIMEOUT"}, + Hidden: true, + }), } } diff --git a/cmd/cloudflared/tunnel/configuration.go b/cmd/cloudflared/tunnel/configuration.go index c682e49d..9fddcf7a 100644 --- a/cmd/cloudflared/tunnel/configuration.go +++ b/cmd/cloudflared/tunnel/configuration.go @@ -12,7 +12,9 @@ import ( "strings" "time" + "github.com/cloudflare/cloudflared/cmd/cloudflared/buildinfo" "github.com/cloudflare/cloudflared/cmd/cloudflared/config" + "github.com/cloudflare/cloudflared/connection" "github.com/cloudflare/cloudflared/origin" "github.com/cloudflare/cloudflared/tlsconfig" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" @@ -145,7 +147,7 @@ If you don't have a certificate signed by Cloudflare, run the command: func prepareTunnelConfig( c *cli.Context, - buildInfo *origin.BuildInfo, + buildInfo *buildinfo.BuildInfo, version string, logger, transportLogger *logrus.Logger, ) (*origin.TunnelConfig, error) { @@ -273,6 +275,15 @@ func prepareTunnelConfig( }, nil } +func serviceDiscoverer(c *cli.Context, logger *logrus.Logger) (connection.EdgeServiceDiscoverer, error) { + // If --edge is specfied, resolve edge server addresses + if len(c.StringSlice("edge")) > 0 { + return connection.NewEdgeHostnameResolver(c.StringSlice("edge")) + } + // Otherwise lookup edge server addresses through service discovery + return connection.NewEdgeAddrResolver(logger) +} + func isRunningFromTerminal() bool { return terminal.IsTerminal(int(os.Stdout.Fd())) } diff --git a/cmd/cloudflared/updater/update.go b/cmd/cloudflared/updater/update.go index e34fb0cc..96cdc95b 100644 --- a/cmd/cloudflared/updater/update.go +++ b/cmd/cloudflared/updater/update.go @@ -1,6 +1,7 @@ package updater import ( + "context" "os" "runtime" "time" @@ -14,6 +15,7 @@ import ( ) const ( + DefaultCheckUpdateFreq = time.Hour * 24 appID = "app_idCzgxYerVD" noUpdateInShellMessage = "cloudflared will not automatically update when run from the shell. To enable auto-updates, run cloudflared as a service: https://developers.cloudflare.com/argo-tunnel/reference/service/" noUpdateOnWindowsMessage = "cloudflared will not automatically update on Windows systems." @@ -75,30 +77,6 @@ func Update(_ *cli.Context) error { return updateOutcome.Error } -func Autoupdate(freq time.Duration, listeners *gracenet.Net, shutdownC chan struct{}) error { - tickC := time.Tick(freq) - for { - updateOutcome := loggedUpdate() - if updateOutcome.Updated { - os.Args = append(os.Args, "--is-autoupdated=true") - pid, err := listeners.StartProcess() - if err != nil { - logger.WithError(err).Error("Unable to restart server automatically") - return err - } - // stop old process after autoupdate. Otherwise we create a new process - // after each update - logger.Infof("PID of the new process is %d", pid) - return nil - } - select { - case <-tickC: - case <-shutdownC: - return nil - } - } -} - // Checks for an update and applies it if one is available func loggedUpdate() UpdateOutcome { updateOutcome := checkForUpdateAndApply() @@ -112,7 +90,88 @@ func loggedUpdate() UpdateOutcome { return updateOutcome } +// AutoUpdater periodically checks for new version of cloudflared. +type AutoUpdater struct { + configurable *configurable + listeners *gracenet.Net + updateConfigChan chan *configurable +} + +// AutoUpdaterConfigurable is the attributes of AutoUpdater that can be reconfigured during runtime +type configurable struct { + enabled bool + freq time.Duration +} + +func NewAutoUpdater(freq time.Duration, listeners *gracenet.Net) *AutoUpdater { + updaterConfigurable := &configurable{ + enabled: true, + freq: freq, + } + if freq == 0 { + updaterConfigurable.enabled = false + updaterConfigurable.freq = DefaultCheckUpdateFreq + } + return &AutoUpdater{ + configurable: updaterConfigurable, + listeners: listeners, + updateConfigChan: make(chan *configurable), + } +} + +func (a *AutoUpdater) Run(ctx context.Context) error { + ticker := time.NewTicker(a.configurable.freq) + for { + if a.configurable.enabled { + updateOutcome := loggedUpdate() + if updateOutcome.Updated { + os.Args = append(os.Args, "--is-autoupdated=true") + pid, err := a.listeners.StartProcess() + if err != nil { + logger.WithError(err).Error("Unable to restart server automatically") + return err + } + // stop old process after autoupdate. Otherwise we create a new process + // after each update + logger.Infof("PID of the new process is %d", pid) + return nil + } + } + select { + case <-ctx.Done(): + return ctx.Err() + case newConfigurable := <-a.updateConfigChan: + ticker.Stop() + a.configurable = newConfigurable + ticker = time.NewTicker(a.configurable.freq) + // Check if there is new version of cloudflared after receiving new AutoUpdaterConfigurable + case <-ticker.C: + } + } +} + +// Update is the method to pass new AutoUpdaterConfigurable to a running AutoUpdater. It is safe to be called concurrently +func (a *AutoUpdater) Update(newFreq time.Duration) { + newConfigurable := &configurable{ + enabled: true, + freq: newFreq, + } + // A ero duration means autoupdate is disabled + if newFreq == 0 { + newConfigurable.enabled = false + newConfigurable.freq = DefaultCheckUpdateFreq + } + a.updateConfigChan <- newConfigurable +} + func IsAutoupdateEnabled(c *cli.Context) bool { + if !SupportAutoUpdate() { + return false + } + return !c.Bool("no-autoupdate") && c.Duration("autoupdate-freq") != 0 +} + +func SupportAutoUpdate() bool { if runtime.GOOS == "windows" { logger.Info(noUpdateOnWindowsMessage) return false @@ -122,8 +181,7 @@ func IsAutoupdateEnabled(c *cli.Context) bool { logger.Info(noUpdateInShellMessage) return false } - - return !c.Bool("no-autoupdate") && c.Duration("autoupdate-freq") != 0 + return true } func isRunningFromTerminal() bool { diff --git a/cmd/cloudflared/updater/update_test.go b/cmd/cloudflared/updater/update_test.go new file mode 100644 index 00000000..218b22b4 --- /dev/null +++ b/cmd/cloudflared/updater/update_test.go @@ -0,0 +1,26 @@ +package updater + +import ( + "context" + "testing" + + "github.com/facebookgo/grace/gracenet" + "github.com/stretchr/testify/assert" +) + +func TestDisabledAutoUpdater(t *testing.T) { + listeners := &gracenet.Net{} + autoupdater := NewAutoUpdater(0, listeners) + ctx, cancel := context.WithCancel(context.Background()) + errC := make(chan error) + go func() { + errC <- autoupdater.Run(ctx) + }() + + assert.False(t, autoupdater.configurable.enabled) + assert.Equal(t, DefaultCheckUpdateFreq, autoupdater.configurable.freq) + + cancel() + // Make sure that autoupdater terminates after canceling the context + assert.Equal(t, context.Canceled, <-errC) +} diff --git a/connection/connection.go b/connection/connection.go index f9dd6125..984b7ba4 100644 --- a/connection/connection.go +++ b/connection/connection.go @@ -2,14 +2,14 @@ package connection import ( "context" - "crypto/tls" "net" - "sync" "time" "github.com/cloudflare/cloudflared/h2mux" "github.com/cloudflare/cloudflared/tunnelrpc" + "github.com/cloudflare/cloudflared/tunnelrpc/pogs" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + "github.com/google/uuid" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -17,7 +17,6 @@ import ( ) const ( - dialTimeout = 5 * time.Second openStreamTimeout = 30 * time.Second ) @@ -29,134 +28,54 @@ func (e dialError) Error() string { return e.cause.Error() } -type muxerShutdownError struct{} - -func (e muxerShutdownError) Error() string { - return "muxer shutdown" +type Connection struct { + id uuid.UUID + muxer *h2mux.Muxer } -type ConnectionConfig struct { - TLSConfig *tls.Config - HeartbeatInterval time.Duration - MaxHeartbeats uint64 - Logger *logrus.Entry -} - -type connectionHandler interface { - serve(ctx context.Context) error - connect(ctx context.Context, parameters *tunnelpogs.ConnectParameters) (*tunnelpogs.ConnectResult, error) - shutdown() -} - -type h2muxHandler struct { - muxer *h2mux.Muxer - logger *logrus.Entry -} - -type muxedStreamHandler struct { -} - -// Implements MuxedStreamHandler interface -func (h *muxedStreamHandler) ServeStream(stream *h2mux.MuxedStream) error { - return nil -} - -func (h *h2muxHandler) serve(ctx context.Context) error { - // Serve doesn't return until h2mux is shutdown - if err := h.muxer.Serve(ctx); err != nil { - return err +func newConnection(muxer *h2mux.Muxer, edgeIP *net.TCPAddr) (*Connection, error) { + id, err := uuid.NewRandom() + if err != nil { + return nil, err } - return muxerShutdownError{} + return &Connection{ + id: id, + muxer: muxer, + }, nil +} + +func (c *Connection) Serve(ctx context.Context) error { + // Serve doesn't return until h2mux is shutdown + return c.muxer.Serve(ctx) } // Connect is used to establish connections with cloudflare's edge network -func (h *h2muxHandler) connect(ctx context.Context, parameters *tunnelpogs.ConnectParameters) (*tunnelpogs.ConnectResult, error) { +func (c *Connection) Connect(ctx context.Context, parameters *tunnelpogs.ConnectParameters, logger *logrus.Entry) (*pogs.ConnectResult, error) { openStreamCtx, cancel := context.WithTimeout(ctx, openStreamTimeout) defer cancel() - conn, err := h.newRPConn(openStreamCtx) + + rpcConn, err := c.newRPConn(openStreamCtx, logger) if err != nil { - return nil, errors.Wrap(err, "Failed to create new RPC connection") + return nil, errors.Wrap(err, "cannot create new RPC connection") } - defer conn.Close() - tsClient := tunnelpogs.TunnelServer_PogsClient{Client: conn.Bootstrap(ctx)} + defer rpcConn.Close() + + tsClient := tunnelpogs.TunnelServer_PogsClient{Client: rpcConn.Bootstrap(ctx)} + return tsClient.Connect(ctx, parameters) } -func (h *h2muxHandler) shutdown() { - h.muxer.Shutdown() +func (c *Connection) Shutdown() { + c.muxer.Shutdown() } -func (h *h2muxHandler) newRPConn(ctx context.Context) (*rpc.Conn, error) { - stream, err := h.muxer.OpenStream(ctx, []h2mux.Header{ - {Name: ":method", Value: "RPC"}, - {Name: ":scheme", Value: "capnp"}, - {Name: ":path", Value: "*"}, - }, nil) +func (c *Connection) newRPConn(ctx context.Context, logger *logrus.Entry) (*rpc.Conn, error) { + stream, err := c.muxer.OpenRPCStream(ctx) if err != nil { return nil, err } return rpc.NewConn( - tunnelrpc.NewTransportLogger(h.logger.WithField("subsystem", "rpc-register"), rpc.StreamTransport(stream)), - tunnelrpc.ConnLog(h.logger.WithField("subsystem", "rpc-transport")), + tunnelrpc.NewTransportLogger(logger.WithField("rpc", "connect"), rpc.StreamTransport(stream)), + tunnelrpc.ConnLog(logger.WithField("rpc", "connect")), ), nil } - -// NewConnectionHandler returns a connectionHandler, wrapping h2mux to make RPC calls -func newH2MuxHandler(ctx context.Context, - config *ConnectionConfig, - edgeIP *net.TCPAddr, -) (connectionHandler, error) { - // Inherit from parent context so we can cancel (Ctrl-C) while dialing - dialCtx, dialCancel := context.WithTimeout(ctx, dialTimeout) - defer dialCancel() - dialer := net.Dialer{DualStack: true} - plaintextEdgeConn, err := dialer.DialContext(dialCtx, "tcp", edgeIP.String()) - if err != nil { - return nil, dialError{cause: errors.Wrap(err, "DialContext error")} - } - edgeConn := tls.Client(plaintextEdgeConn, config.TLSConfig) - edgeConn.SetDeadline(time.Now().Add(dialTimeout)) - err = edgeConn.Handshake() - if err != nil { - return nil, dialError{cause: errors.Wrap(err, "Handshake with edge error")} - } - // clear the deadline on the conn; h2mux has its own timeouts - edgeConn.SetDeadline(time.Time{}) - // Establish a muxed connection with the edge - // Client mux handshake with agent server - muxer, err := h2mux.Handshake(edgeConn, edgeConn, h2mux.MuxerConfig{ - Timeout: dialTimeout, - Handler: &muxedStreamHandler{}, - IsClient: true, - HeartbeatInterval: config.HeartbeatInterval, - MaxHeartbeats: config.MaxHeartbeats, - Logger: config.Logger, - }) - if err != nil { - return nil, err - } - return &h2muxHandler{ - muxer: muxer, - logger: config.Logger, - }, nil -} - -// connectionPool is a pool of connection handlers -type connectionPool struct { - sync.Mutex - connectionHandlers []connectionHandler -} - -func (cp *connectionPool) put(h connectionHandler) { - cp.Lock() - defer cp.Unlock() - cp.connectionHandlers = append(cp.connectionHandlers, h) -} - -func (cp *connectionPool) close() { - cp.Lock() - defer cp.Unlock() - for _, h := range cp.connectionHandlers { - h.shutdown() - } -} diff --git a/connection/discovery.go b/connection/discovery.go index 898b0755..7170f760 100644 --- a/connection/discovery.go +++ b/connection/discovery.go @@ -5,10 +5,11 @@ import ( "crypto/tls" "fmt" "net" + "sync" "time" "github.com/pkg/errors" - log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus" ) const ( @@ -22,6 +23,9 @@ const ( dotServerName = "cloudflare-dns.com" dotServerAddr = "1.1.1.1:853" dotTimeout = time.Duration(15 * time.Second) + + // SRV record resolution TTL + resolveEdgeAddrTTL = 1 * time.Hour ) var friendlyDNSErrorLines = []string{ @@ -34,20 +38,65 @@ var friendlyDNSErrorLines = []string{ ` https://developers.cloudflare.com/1.1.1.1/setting-up-1.1.1.1/`, } -func ResolveEdgeIPs(logger *log.Logger, addresses []string) ([]*net.TCPAddr, error) { - if len(addresses) > 0 { - var tcpAddrs []*net.TCPAddr - for _, address := range addresses { - // Addresses specified (for testing, usually) - tcpAddr, err := net.ResolveTCPAddr("tcp", address) - if err != nil { - return nil, err - } - tcpAddrs = append(tcpAddrs, tcpAddr) - } - return tcpAddrs, nil +// EdgeServiceDiscoverer is an interface for looking up Cloudflare's edge network addresses +type EdgeServiceDiscoverer interface { + // Addr returns an address to connect to cloudflare's edge network + Addr() *net.TCPAddr + // AvailableAddrs returns the number of unique addresses + AvailableAddrs() uint8 + // Refresh rediscover Cloudflare's edge network addresses + Refresh() error +} + +// EdgeAddrResolver discovers the addresses of Cloudflare's edge network through SRV record. +// It implements EdgeServiceDiscoverer interface +type EdgeAddrResolver struct { + sync.Mutex + // Addrs to connect to cloudflare's edge network + addrs []*net.TCPAddr + // index of the next element to use in addrs + nextAddrIndex int + logger *logrus.Entry +} + +func NewEdgeAddrResolver(logger *logrus.Logger) (EdgeServiceDiscoverer, error) { + r := &EdgeAddrResolver{ + logger: logger.WithField("subsystem", " edgeAddrResolver"), } - // HA service discovery lookup + if err := r.Refresh(); err != nil { + return nil, err + } + return r, nil +} + +func (r *EdgeAddrResolver) Addr() *net.TCPAddr { + r.Lock() + defer r.Unlock() + addr := r.addrs[r.nextAddrIndex] + r.nextAddrIndex = (r.nextAddrIndex + 1) % len(r.addrs) + return addr +} + +func (r *EdgeAddrResolver) AvailableAddrs() uint8 { + r.Lock() + defer r.Unlock() + return uint8(len(r.addrs)) +} + +func (r *EdgeAddrResolver) Refresh() error { + newAddrs, err := EdgeDiscovery(r.logger) + if err != nil { + return err + } + r.Lock() + defer r.Unlock() + r.addrs = newAddrs + r.nextAddrIndex = 0 + return nil +} + +// HA service discovery lookup +func EdgeDiscovery(logger *logrus.Entry) ([]*net.TCPAddr, error) { _, addrs, err := net.LookupSRV(srvService, srvProto, srvName) if err != nil { // Try to fall back to DoT from Cloudflare directly. @@ -78,7 +127,7 @@ func ResolveEdgeIPs(logger *log.Logger, addresses []string) ([]*net.TCPAddr, err var resolvedIPsPerCNAME [][]*net.TCPAddr var lookupErr error for _, addr := range addrs { - ips, err := ResolveSRVToTCP(addr) + ips, err := resolveSRVToTCP(addr) if err != nil || len(ips) == 0 { // don't return early, we might be able to resolve other addresses lookupErr = err @@ -86,14 +135,14 @@ func ResolveEdgeIPs(logger *log.Logger, addresses []string) ([]*net.TCPAddr, err } resolvedIPsPerCNAME = append(resolvedIPsPerCNAME, ips) } - ips := FlattenServiceIPs(resolvedIPsPerCNAME) + ips := flattenServiceIPs(resolvedIPsPerCNAME) if lookupErr == nil && len(ips) == 0 { return nil, fmt.Errorf("Unknown service discovery error") } return ips, lookupErr } -func ResolveSRVToTCP(srv *net.SRV) ([]*net.TCPAddr, error) { +func resolveSRVToTCP(srv *net.SRV) ([]*net.TCPAddr, error) { ips, err := net.LookupIP(srv.Target) if err != nil { return nil, err @@ -107,7 +156,7 @@ func ResolveSRVToTCP(srv *net.SRV) ([]*net.TCPAddr, error) { // FlattenServiceIPs transposes and flattens the input slices such that the // first element of the n inner slices are the first n elements of the result. -func FlattenServiceIPs(ipsByService [][]*net.TCPAddr) []*net.TCPAddr { +func flattenServiceIPs(ipsByService [][]*net.TCPAddr) []*net.TCPAddr { var result []*net.TCPAddr for len(ipsByService) > 0 { filtered := ipsByService[:0] @@ -141,3 +190,65 @@ func fallbackResolver(serverName, serverAddress string) *net.Resolver { }, } } + +// EdgeHostnameResolver discovers the addresses of Cloudflare's edge network via a list of server hostnames. +// It implements EdgeServiceDiscoverer interface, and is used mainly for testing connectivity. +type EdgeHostnameResolver struct { + sync.Mutex + // hostnames of edge servers + hostnames []string + // Addrs to connect to cloudflare's edge network + addrs []*net.TCPAddr + // index of the next element to use in addrs + nextAddrIndex int +} + +func NewEdgeHostnameResolver(edgeHostnames []string) (EdgeServiceDiscoverer, error) { + r := &EdgeHostnameResolver{ + hostnames: edgeHostnames, + } + if err := r.Refresh(); err != nil { + return nil, err + } + return r, nil +} + +func (r *EdgeHostnameResolver) Addr() *net.TCPAddr { + r.Lock() + defer r.Unlock() + addr := r.addrs[r.nextAddrIndex] + r.nextAddrIndex = (r.nextAddrIndex + 1) % len(r.addrs) + return addr +} + +func (r *EdgeHostnameResolver) AvailableAddrs() uint8 { + r.Lock() + defer r.Unlock() + return uint8(len(r.addrs)) +} + +func (r *EdgeHostnameResolver) Refresh() error { + newAddrs, err := ResolveAddrs(r.hostnames) + if err != nil { + return err + } + r.Lock() + defer r.Unlock() + r.addrs = newAddrs + r.nextAddrIndex = 0 + return nil +} + +// Resolve TCP address given a list of addresses. Address can be a hostname, however, it will return at most one +// of the hostname's IP addresses +func ResolveAddrs(addrs []string) ([]*net.TCPAddr, error) { + var tcpAddrs []*net.TCPAddr + for _, addr := range addrs { + tcpAddr, err := net.ResolveTCPAddr("tcp", addr) + if err != nil { + return nil, err + } + tcpAddrs = append(tcpAddrs, tcpAddr) + } + return tcpAddrs, nil +} diff --git a/connection/discovery_test.go b/connection/discovery_test.go index 4e5aeacf..806df8bb 100644 --- a/connection/discovery_test.go +++ b/connection/discovery_test.go @@ -7,8 +7,26 @@ import ( "github.com/stretchr/testify/assert" ) +type mockEdgeServiceDiscoverer struct { +} + +func (mr *mockEdgeServiceDiscoverer) Addr() *net.TCPAddr { + return &net.TCPAddr{ + IP: net.ParseIP("127.0.0.1"), + Port: 63102, + } +} + +func (mr *mockEdgeServiceDiscoverer) AvailableAddrs() uint8 { + return 1 +} + +func (mr *mockEdgeServiceDiscoverer) Refresh() error { + return nil +} + func TestFlattenServiceIPs(t *testing.T) { - result := FlattenServiceIPs([][]*net.TCPAddr{ + result := flattenServiceIPs([][]*net.TCPAddr{ []*net.TCPAddr{ &net.TCPAddr{Port: 1}, &net.TCPAddr{Port: 2}, diff --git a/connection/manager.go b/connection/manager.go new file mode 100644 index 00000000..e266ba0f --- /dev/null +++ b/connection/manager.go @@ -0,0 +1,281 @@ +package connection + +import ( + "context" + "crypto/tls" + "fmt" + "net" + "sync" + "time" + + "github.com/cloudflare/cloudflared/cmd/cloudflared/buildinfo" + "github.com/cloudflare/cloudflared/h2mux" + "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + "github.com/google/uuid" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +const ( + quickStartLink = "https://developers.cloudflare.com/argo-tunnel/quickstart/" + faqLink = "https://developers.cloudflare.com/argo-tunnel/faq/" +) + +// EdgeManager manages connections with the edge +type EdgeManager struct { + // streamHandler handles stream opened by the edge + streamHandler h2mux.MuxedStreamHandler + // TLSConfig is the TLS configuration to connect with edge + tlsConfig *tls.Config + // cloudflaredConfig is the cloudflared configuration that is determined when the process first starts + cloudflaredConfig *CloudflaredConfig + // serviceDiscoverer returns the next edge addr to connect to + serviceDiscoverer EdgeServiceDiscoverer + // state is attributes of ConnectionManager that can change during runtime. + state *edgeManagerState + + logger *logrus.Entry +} + +// EdgeConnectionManagerConfigurable is the configurable attributes of a EdgeConnectionManager +type EdgeManagerConfigurable struct { + TunnelHostnames []h2mux.TunnelHostname + *pogs.EdgeConnectionConfig +} + +type CloudflaredConfig struct { + CloudflaredID uuid.UUID + Tags []pogs.Tag + BuildInfo *buildinfo.BuildInfo +} + +func NewEdgeManager( + streamHandler h2mux.MuxedStreamHandler, + edgeConnMgrConfigurable *EdgeManagerConfigurable, + userCredential []byte, + tlsConfig *tls.Config, + serviceDiscoverer EdgeServiceDiscoverer, + cloudflaredConfig *CloudflaredConfig, + logger *logrus.Logger, +) *EdgeManager { + return &EdgeManager{ + streamHandler: streamHandler, + tlsConfig: tlsConfig, + cloudflaredConfig: cloudflaredConfig, + serviceDiscoverer: serviceDiscoverer, + state: newEdgeConnectionManagerState(edgeConnMgrConfigurable, userCredential), + logger: logger.WithField("subsystem", "connectionManager"), + } +} + +func (em *EdgeManager) Run(ctx context.Context) error { + defer em.shutdown() + + resolveEdgeIPTicker := time.Tick(resolveEdgeAddrTTL) + for { + select { + case <-ctx.Done(): + return errors.Wrap(ctx.Err(), "EdgeConnectionManager terminated") + case <-resolveEdgeIPTicker: + if err := em.serviceDiscoverer.Refresh(); err != nil { + em.logger.WithError(err).Warn("Cannot refresh Cloudflare edge addresses") + } + default: + time.Sleep(1 * time.Second) + } + // Create/delete connection one at a time, so we don't need to adjust for connections that are being created/deleted + // in shouldCreateConnection or shouldReduceConnection calculation + if em.state.shouldCreateConnection(em.serviceDiscoverer.AvailableAddrs()) { + if err := em.newConnection(ctx); err != nil { + em.logger.WithError(err).Error("cannot create new connection") + } + } else if em.state.shouldReduceConnection() { + if err := em.closeConnection(ctx); err != nil { + em.logger.WithError(err).Error("cannot close connection") + } + } + } +} + +func (em *EdgeManager) UpdateConfigurable(newConfigurable *EdgeManagerConfigurable) { + em.logger.Infof("New edge connection manager configuration %+v", newConfigurable) + em.state.updateConfigurable(newConfigurable) +} + +func (em *EdgeManager) newConnection(ctx context.Context) error { + edgeIP := em.serviceDiscoverer.Addr() + edgeConn, err := em.dialEdge(ctx, edgeIP) + if err != nil { + return errors.Wrap(err, "dial edge error") + } + configurable := em.state.getConfigurable() + // Establish a muxed connection with the edge + // Client mux handshake with agent server + muxer, err := h2mux.Handshake(edgeConn, edgeConn, h2mux.MuxerConfig{ + Timeout: configurable.Timeout, + Handler: em.streamHandler, + IsClient: true, + HeartbeatInterval: configurable.HeartbeatInterval, + MaxHeartbeats: configurable.MaxFailedHeartbeats, + Logger: em.logger.WithField("subsystem", "muxer"), + }) + if err != nil { + return errors.Wrap(err, "handshake with edge error") + } + + h2muxConn, err := newConnection(muxer, edgeIP) + if err != nil { + return errors.Wrap(err, "create h2mux connection error") + } + + go em.serveConn(ctx, h2muxConn) + + connResult, err := h2muxConn.Connect(ctx, &pogs.ConnectParameters{ + OriginCert: em.state.getUserCredential(), + CloudflaredID: em.cloudflaredConfig.CloudflaredID, + NumPreviousAttempts: 0, + CloudflaredVersion: em.cloudflaredConfig.BuildInfo.CloudflaredVersion, + }, em.logger) + if err != nil { + h2muxConn.Shutdown() + return errors.Wrap(err, "connect with edge error") + } + + if connErr := connResult.Err; connErr != nil { + if !connErr.ShouldRetry { + return errors.Wrap(connErr, em.noRetryMessage()) + } + return errors.Wrapf(connErr, "server respond with retry at %v", connErr.RetryAfter) + } + + em.state.newConnection(h2muxConn) + em.logger.Infof("connected to %s", connResult.ServerInfo.LocationName) + return nil +} + +func (em *EdgeManager) closeConnection(ctx context.Context) error { + conn := em.state.getFirstConnection() + if conn == nil { + return fmt.Errorf("no connection to close") + } + conn.Shutdown() + return nil +} + +func (em *EdgeManager) serveConn(ctx context.Context, conn *Connection) { + err := conn.Serve(ctx) + em.logger.WithError(err).Warn("Connection closed") + em.state.closeConnection(conn) +} + +func (em *EdgeManager) dialEdge(ctx context.Context, edgeIP *net.TCPAddr) (*tls.Conn, error) { + timeout := em.state.getConfigurable().Timeout + // Inherit from parent context so we can cancel (Ctrl-C) while dialing + dialCtx, dialCancel := context.WithTimeout(ctx, timeout) + defer dialCancel() + + dialer := net.Dialer{DualStack: true} + edgeConn, err := dialer.DialContext(dialCtx, "tcp", edgeIP.String()) + if err != nil { + return nil, dialError{cause: errors.Wrap(err, "DialContext error")} + } + tlsEdgeConn := tls.Client(edgeConn, em.tlsConfig) + tlsEdgeConn.SetDeadline(time.Now().Add(timeout)) + + if err = tlsEdgeConn.Handshake(); err != nil { + return nil, dialError{cause: errors.Wrap(err, "Handshake with edge error")} + } + // clear the deadline on the conn; h2mux has its own timeouts + tlsEdgeConn.SetDeadline(time.Time{}) + return tlsEdgeConn, nil +} + +func (em *EdgeManager) noRetryMessage() string { + messageTemplate := "cloudflared could not register an Argo Tunnel on your account. Please confirm the following before trying again:" + + "1. You have Argo Smart Routing enabled in your account, See Enable Argo section of %s." + + "2. Your credential at %s is still valid. See %s." + return fmt.Sprintf(messageTemplate, quickStartLink, em.state.getConfigurable().UserCredentialPath, faqLink) +} + +func (em *EdgeManager) shutdown() { + em.state.shutdown() +} + +type edgeManagerState struct { + sync.RWMutex + configurable *EdgeManagerConfigurable + userCredential []byte + conns map[uuid.UUID]*Connection +} + +func newEdgeConnectionManagerState(configurable *EdgeManagerConfigurable, userCredential []byte) *edgeManagerState { + return &edgeManagerState{ + configurable: configurable, + userCredential: userCredential, + conns: make(map[uuid.UUID]*Connection), + } +} + +func (ems *edgeManagerState) shouldCreateConnection(availableEdgeAddrs uint8) bool { + ems.RLock() + defer ems.RUnlock() + expectedHAConns := ems.configurable.NumHAConnections + if availableEdgeAddrs < expectedHAConns { + expectedHAConns = availableEdgeAddrs + } + return uint8(len(ems.conns)) < expectedHAConns +} + +func (ems *edgeManagerState) shouldReduceConnection() bool { + ems.RLock() + defer ems.RUnlock() + return uint8(len(ems.conns)) > ems.configurable.NumHAConnections +} + +func (ems *edgeManagerState) newConnection(conn *Connection) { + ems.Lock() + defer ems.Unlock() + ems.conns[conn.id] = conn +} + +func (ems *edgeManagerState) closeConnection(conn *Connection) { + ems.Lock() + defer ems.Unlock() + delete(ems.conns, conn.id) +} + +func (ems *edgeManagerState) getFirstConnection() *Connection { + ems.RLock() + defer ems.RUnlock() + + for _, conn := range ems.conns { + return conn + } + return nil +} + +func (ems *edgeManagerState) shutdown() { + ems.Lock() + defer ems.Unlock() + for _, conn := range ems.conns { + conn.Shutdown() + } +} + +func (ems *edgeManagerState) getConfigurable() *EdgeManagerConfigurable { + ems.Lock() + defer ems.Unlock() + return ems.configurable +} + +func (ems *edgeManagerState) updateConfigurable(newConfigurable *EdgeManagerConfigurable) { + ems.Lock() + defer ems.Unlock() + ems.configurable = newConfigurable +} + +func (ems *edgeManagerState) getUserCredential() []byte { + ems.RLock() + defer ems.RUnlock() + return ems.userCredential +} diff --git a/connection/manager_test.go b/connection/manager_test.go new file mode 100644 index 00000000..7565567f --- /dev/null +++ b/connection/manager_test.go @@ -0,0 +1,77 @@ +package connection + +import ( + "testing" + "time" + + "github.com/cloudflare/cloudflared/cmd/cloudflared/buildinfo" + "github.com/stretchr/testify/assert" + + "github.com/cloudflare/cloudflared/h2mux" + "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + "github.com/google/uuid" + "github.com/sirupsen/logrus" +) + +var ( + configurable = &EdgeManagerConfigurable{ + []h2mux.TunnelHostname{ + "http.example.com", + "ws.example.com", + "hello.example.com", + }, + &pogs.EdgeConnectionConfig{ + NumHAConnections: 1, + HeartbeatInterval: 1 * time.Second, + Timeout: 5 * time.Second, + MaxFailedHeartbeats: 3, + UserCredentialPath: "/etc/cloudflared/cert.pem", + }, + } + cloudflaredConfig = &CloudflaredConfig{ + CloudflaredID: uuid.New(), + Tags: []pogs.Tag{ + {Name: "pool", Value: "east-6"}, + }, + BuildInfo: &buildinfo.BuildInfo{ + GoOS: "linux", + GoVersion: "1.12", + GoArch: "amd64", + CloudflaredVersion: "2019.6.0", + }, + } +) + +type mockStreamHandler struct { +} + +func (msh *mockStreamHandler) ServeStream(*h2mux.MuxedStream) error { + return nil +} + +func mockEdgeManager() *EdgeManager { + return NewEdgeManager( + &mockStreamHandler{}, + configurable, + []byte{}, + nil, + &mockEdgeServiceDiscoverer{}, + cloudflaredConfig, + logrus.New(), + ) +} + +func TestUpdateConfigurable(t *testing.T) { + m := mockEdgeManager() + newConfigurable := &EdgeManagerConfigurable{ + []h2mux.TunnelHostname{ + "second.example.com", + }, + &pogs.EdgeConnectionConfig{ + NumHAConnections: 2, + }, + } + m.UpdateConfigurable(newConfigurable) + + assert.Equal(t, newConfigurable, m.state.getConfigurable()) +} diff --git a/connection/supervisor.go b/connection/supervisor.go deleted file mode 100644 index 50855d3f..00000000 --- a/connection/supervisor.go +++ /dev/null @@ -1,147 +0,0 @@ -package connection - -import ( - "context" - "net" - "time" - - tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" - "github.com/google/uuid" - "github.com/pkg/errors" - "github.com/sirupsen/logrus" -) - -const ( - // Waiting time before retrying a failed tunnel connection - reconnectDuration = time.Second * 10 - // SRV record resolution TTL - resolveTTL = time.Hour - // Interval between establishing new connection - connectionInterval = time.Second -) - -type CloudflaredConfig struct { - ConnectionConfig *ConnectionConfig - OriginCert []byte - Tags []tunnelpogs.Tag - EdgeAddrs []string - HAConnections uint - Logger *logrus.Logger - CloudflaredVersion string -} - -// Supervisor is a stateful object that manages connections with the edge -type Supervisor struct { - config *CloudflaredConfig - state *supervisorState - connErrors chan error -} - -type supervisorState struct { - // IPs to connect to cloudflare's edge network - edgeIPs []*net.TCPAddr - // index of the next element to use in edgeIPs - nextEdgeIPIndex int - // last time edgeIPs were refreshed - lastResolveTime time.Time - // ID of this cloudflared instance - cloudflaredID uuid.UUID - // connectionPool is a pool of connectionHandlers that can be used to make RPCs - connectionPool *connectionPool -} - -func (s *supervisorState) getNextEdgeIP() *net.TCPAddr { - ip := s.edgeIPs[s.nextEdgeIPIndex%len(s.edgeIPs)] - s.nextEdgeIPIndex++ - return ip -} - -func NewSupervisor(config *CloudflaredConfig) *Supervisor { - return &Supervisor{ - config: config, - state: &supervisorState{ - connectionPool: &connectionPool{}, - }, - connErrors: make(chan error), - } -} - -func (s *Supervisor) Run(ctx context.Context) error { - logger := s.config.Logger - if err := s.initialize(); err != nil { - logger.WithError(err).Error("Failed to get edge IPs") - return err - } - defer s.state.connectionPool.close() - - var currentConnectionCount uint - expectedConnectionCount := s.config.HAConnections - if uint(len(s.state.edgeIPs)) < s.config.HAConnections { - logger.Warnf("You requested %d HA connections but I can give you at most %d.", s.config.HAConnections, len(s.state.edgeIPs)) - expectedConnectionCount = uint(len(s.state.edgeIPs)) - } - for { - select { - case <-ctx.Done(): - return nil - case connErr := <-s.connErrors: - logger.WithError(connErr).Warnf("Connection dropped unexpectedly") - currentConnectionCount-- - default: - time.Sleep(5 * time.Second) - } - if currentConnectionCount < expectedConnectionCount { - h, err := newH2MuxHandler(ctx, s.config.ConnectionConfig, s.state.getNextEdgeIP()) - if err != nil { - logger.WithError(err).Error("Failed to create new connection handler") - continue - } - go func() { - s.connErrors <- h.serve(ctx) - }() - connResult, err := s.connect(ctx, s.config, s.state.cloudflaredID, h) - if err != nil { - logger.WithError(err).Errorf("Failed to connect to cloudflared's edge network") - h.shutdown() - continue - } - if connErr := connResult.Err; connErr != nil && !connErr.ShouldRetry { - logger.WithError(connErr).Errorf("Server respond with don't retry to connect") - h.shutdown() - return err - } - logger.Infof("Connected to %s", connResult.ServerInfo.LocationName) - s.state.connectionPool.put(h) - currentConnectionCount++ - } - } -} - -func (s *Supervisor) initialize() error { - edgeIPs, err := ResolveEdgeIPs(s.config.Logger, s.config.EdgeAddrs) - if err != nil { - return errors.Wrapf(err, "Failed to resolve cloudflare edge network address") - } - s.state.edgeIPs = edgeIPs - s.state.lastResolveTime = time.Now() - cloudflaredID, err := uuid.NewRandom() - if err != nil { - return errors.Wrap(err, "Failed to generate cloudflared ID") - } - s.state.cloudflaredID = cloudflaredID - return nil -} - -func (s *Supervisor) connect(ctx context.Context, - config *CloudflaredConfig, - cloudflaredID uuid.UUID, - h connectionHandler, -) (*tunnelpogs.ConnectResult, error) { - connectParameters := &tunnelpogs.ConnectParameters{ - OriginCert: config.OriginCert, - CloudflaredID: cloudflaredID, - NumPreviousAttempts: 0, - CloudflaredVersion: config.CloudflaredVersion, - } - return h.connect(ctx, connectParameters) -} diff --git a/h2mux/h2mux.go b/h2mux/h2mux.go index 2ac07b62..6e0905c2 100644 --- a/h2mux/h2mux.go +++ b/h2mux/h2mux.go @@ -94,6 +94,14 @@ type Header struct { Name, Value string } +func RPCHeaders() []Header { + return []Header{ + {Name: ":method", Value: "RPC"}, + {Name: ":scheme", Value: "capnp"}, + {Name: ":path", Value: "*"}, + } +} + // Handshake establishes a muxed connection with the peer. // After the handshake completes, it is possible to open and accept streams. func Handshake( @@ -414,6 +422,41 @@ func (m *Muxer) OpenStream(ctx context.Context, headers []Header, body io.Reader } } +func (m *Muxer) OpenRPCStream(ctx context.Context) (*MuxedStream, error) { + stream := &MuxedStream{ + responseHeadersReceived: make(chan struct{}), + readBuffer: NewSharedBuffer(), + writeBuffer: &bytes.Buffer{}, + writeBufferMaxLen: m.config.StreamWriteBufferMaxLen, + writeBufferHasSpace: make(chan struct{}, 1), + receiveWindow: m.config.DefaultWindowSize, + receiveWindowCurrentMax: m.config.DefaultWindowSize, + receiveWindowMax: m.config.MaxWindowSize, + sendWindow: m.config.DefaultWindowSize, + readyList: m.readyList, + writeHeaders: RPCHeaders(), + dictionaries: m.muxReader.dictionaries, + } + + select { + // Will be received by mux writer + case <-ctx.Done(): + return nil, ErrOpenStreamTimeout + case <-m.abortChan: + return nil, ErrConnectionClosed + case m.newStreamChan <- MuxedStreamRequest{stream: stream, body: nil}: + } + + select { + case <-ctx.Done(): + return nil, ErrResponseHeadersTimeout + case <-m.abortChan: + return nil, ErrConnectionClosed + case <-stream.responseHeadersReceived: + return stream, nil + } +} + func (m *Muxer) Metrics() *MuxerMetrics { return m.muxMetricsUpdater.metrics() } diff --git a/h2mux/muxedstream.go b/h2mux/muxedstream.go index 2bb59db1..44d6f1e2 100644 --- a/h2mux/muxedstream.go +++ b/h2mux/muxedstream.go @@ -68,7 +68,8 @@ type MuxedStream struct { sentEOF bool // true if the peer sent us an EOF receivedEOF bool - + // If valid, tunnelHostname is used to identify which origin service is the intended recipient of the request + tunnelHostname TunnelHostname // Compression-related fields receivedUseDict bool method string @@ -77,6 +78,16 @@ type MuxedStream struct { dictionaries h2Dictionaries } +type TunnelHostname string + +func (th TunnelHostname) String() string { + return string(th) +} + +func (th TunnelHostname) IsSet() bool { + return th != "" +} + func (s *MuxedStream) Read(p []byte) (n int, err error) { var readBuffer ReadWriteClosedCloser if s.dictionaries.read != nil { @@ -185,6 +196,25 @@ func (s *MuxedStream) WriteHeaders(headers []Header) error { return nil } +// IsRPCStream returns if the stream is used to transport RPC. +func (s *MuxedStream) IsRPCStream() bool { + rpcHeaders := RPCHeaders() + if len(s.Headers) != len(rpcHeaders) { + return false + } + // The headers order matters, so RPC stream should be opened with OpenRPCStream method and let MuxWriter serializes the headers. + for i, rpcHeader := range rpcHeaders { + if s.Headers[i] != rpcHeader { + return false + } + } + return true +} + +func (s *MuxedStream) TunnelHostname() TunnelHostname { + return s.tunnelHostname +} + func (s *MuxedStream) getReceiveWindow() uint32 { s.writeLock.Lock() defer s.writeLock.Unlock() diff --git a/h2mux/muxedstream_test.go b/h2mux/muxedstream_test.go index 3672b531..b0e0ac13 100644 --- a/h2mux/muxedstream_test.go +++ b/h2mux/muxedstream_test.go @@ -98,3 +98,30 @@ func TestMuxedStreamEOF(t *testing.T) { assert.Equal(t, 0, n) } } + +func TestIsRPCStream(t *testing.T) { + tests := []struct { + stream *MuxedStream + isRPCStream bool + }{ + { + stream: &MuxedStream{}, + isRPCStream: false, + }, + { + stream: &MuxedStream{Headers: RPCHeaders()}, + isRPCStream: true, + }, + { + stream: &MuxedStream{Headers: []Header{ + {Name: ":method", Value: "rpc"}, + {Name: ":scheme", Value: "Capnp"}, + {Name: ":path", Value: "/"}, + }}, + isRPCStream: false, + }, + } + for _, test := range tests { + assert.Equal(t, test.isRPCStream, test.stream.IsRPCStream()) + } +} diff --git a/h2mux/muxreader.go b/h2mux/muxreader.go index 3bdc8216..d97fcd8c 100644 --- a/h2mux/muxreader.go +++ b/h2mux/muxreader.go @@ -11,6 +11,10 @@ import ( "golang.org/x/net/http2" ) +const ( + CloudflaredProxyTunnelHostnameHeader = "cf-cloudflared-proxy-tunnel-hostname" +) + type MuxReader struct { // f is used to read HTTP2 frames. f *http2.Framer @@ -235,6 +239,8 @@ func (r *MuxReader) receiveHeaderData(frame *http2.MetaHeadersFrame) error { if r.dictionaries.write != nil { continue } + case CloudflaredProxyTunnelHostnameHeader: + stream.tunnelHostname = TunnelHostname(header.Value) } headers = append(headers, Header{Name: header.Name, Value: header.Value}) } diff --git a/h2mux/muxreader_test.go b/h2mux/muxreader_test.go new file mode 100644 index 00000000..dd3bf440 --- /dev/null +++ b/h2mux/muxreader_test.go @@ -0,0 +1,107 @@ +package h2mux + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) + +var ( + methodHeader = Header{ + Name: ":method", + Value: "GET", + } + schemeHeader = Header{ + Name: ":scheme", + Value: "https", + } + pathHeader = Header{ + Name: ":path", + Value: "/api/tunnels", + } + tunnelHostnameHeader = Header{ + Name: CloudflaredProxyTunnelHostnameHeader, + Value: "tunnel.example.com", + } + respStatusHeader = Header{ + Name: ":status", + Value: "200", + } +) + +type mockOriginStreamHandler struct { + stream *MuxedStream +} + +func (mosh *mockOriginStreamHandler) ServeStream(stream *MuxedStream) error { + mosh.stream = stream + // Echo tunnel hostname in header + stream.WriteHeaders([]Header{respStatusHeader}) + return nil +} + +func getCloudflaredProxyTunnelHostnameHeader(stream *MuxedStream) string { + for _, header := range stream.Headers { + if header.Name == CloudflaredProxyTunnelHostnameHeader { + return header.Value + } + } + return "" +} + +func assertOpenStreamSucceed(t *testing.T, stream *MuxedStream, err error) { + assert.NoError(t, err) + assert.Len(t, stream.Headers, 1) + assert.Equal(t, respStatusHeader, stream.Headers[0]) +} + +func TestMissingHeaders(t *testing.T) { + originHandler := &mockOriginStreamHandler{} + muxPair := NewDefaultMuxerPair(t, originHandler.ServeStream) + muxPair.Serve(t) + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + reqHeaders := []Header{ + { + Name: "content-type", + Value: "application/json", + }, + } + + // Request doesn't contain CloudflaredProxyTunnelHostnameHeader + stream, err := muxPair.EdgeMux.OpenStream(ctx, reqHeaders, nil) + assertOpenStreamSucceed(t, stream, err) + + assert.Empty(t, originHandler.stream.method) + assert.Empty(t, originHandler.stream.path) + assert.False(t, originHandler.stream.TunnelHostname().IsSet()) +} + +func TestReceiveHeaderData(t *testing.T) { + originHandler := &mockOriginStreamHandler{} + muxPair := NewDefaultMuxerPair(t, originHandler.ServeStream) + muxPair.Serve(t) + + reqHeaders := []Header{ + methodHeader, + schemeHeader, + pathHeader, + tunnelHostnameHeader, + } + + ctx, cancel := context.WithTimeout(context.Background(), time.Second) + defer cancel() + + reqHeaders = append(reqHeaders, tunnelHostnameHeader) + stream, err := muxPair.EdgeMux.OpenStream(ctx, reqHeaders, nil) + assertOpenStreamSucceed(t, stream, err) + + assert.Equal(t, methodHeader.Value, originHandler.stream.method) + assert.Equal(t, pathHeader.Value, originHandler.stream.path) + assert.True(t, originHandler.stream.TunnelHostname().IsSet()) + assert.Equal(t, tunnelHostnameHeader.Value, originHandler.stream.TunnelHostname().String()) +} diff --git a/origin/build_info.go b/origin/build_info.go deleted file mode 100644 index 72f0965a..00000000 --- a/origin/build_info.go +++ /dev/null @@ -1,19 +0,0 @@ -package origin - -import ( - "runtime" -) - -type BuildInfo struct { - GoOS string `json:"go_os"` - GoVersion string `json:"go_version"` - GoArch string `json:"go_arch"` -} - -func GetBuildInfo() *BuildInfo { - return &BuildInfo{ - GoOS: runtime.GOOS, - GoVersion: runtime.Version(), - GoArch: runtime.GOARCH, - } -} diff --git a/origin/supervisor.go b/origin/supervisor.go index 9f0c352d..ff7d96f9 100644 --- a/origin/supervisor.go +++ b/origin/supervisor.go @@ -6,6 +6,8 @@ import ( "net" "time" + "github.com/sirupsen/logrus" + "github.com/cloudflare/cloudflared/connection" "github.com/cloudflare/cloudflared/signal" @@ -34,6 +36,8 @@ type Supervisor struct { // currently-connecting tunnels to finish connecting so we can reset backoff timer nextConnectedIndex int nextConnectedSignal chan struct{} + + logger *logrus.Entry } type resolveResult struct { @@ -51,6 +55,7 @@ func NewSupervisor(config *TunnelConfig) *Supervisor { config: config, tunnelErrors: make(chan tunnelError), tunnelsConnecting: map[int]chan struct{}{}, + logger: config.Logger.WithField("subsystem", "supervisor"), } } @@ -124,8 +129,10 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, u } func (s *Supervisor) initialize(ctx context.Context, connectedSignal *signal.Signal, u uuid.UUID) error { - logger := s.config.Logger - edgeIPs, err := connection.ResolveEdgeIPs(logger, s.config.EdgeAddrs) + logger := s.logger + + edgeIPs, err := s.resolveEdgeIPs() + if err != nil { logger.Infof("ResolveEdgeIPs err") return err @@ -215,6 +222,15 @@ func (s *Supervisor) getEdgeIP(index int) *net.TCPAddr { return s.edgeIPs[index%len(s.edgeIPs)] } +func (s *Supervisor) resolveEdgeIPs() ([]*net.TCPAddr, error) { + // If --edge is specfied, resolve edge server addresses + if len(s.config.EdgeAddrs) > 0 { + return connection.ResolveAddrs(s.config.EdgeAddrs) + } + // Otherwise lookup edge server addresses through service discovery + return connection.EdgeDiscovery(s.logger) +} + func (s *Supervisor) refreshEdgeIPs() { if s.resolverC != nil { return @@ -224,7 +240,7 @@ func (s *Supervisor) refreshEdgeIPs() { } s.resolverC = make(chan resolveResult) go func() { - edgeIPs, err := connection.ResolveEdgeIPs(s.config.Logger, s.config.EdgeAddrs) + edgeIPs, err := s.resolveEdgeIPs() s.resolverC <- resolveResult{edgeIPs: edgeIPs, err: err} }() } diff --git a/origin/tunnel.go b/origin/tunnel.go index b3dc0ff8..b86f2ab8 100644 --- a/origin/tunnel.go +++ b/origin/tunnel.go @@ -14,9 +14,10 @@ import ( "sync" "time" - "github.com/cloudflare/cloudflared/connection" + "github.com/cloudflare/cloudflared/cmd/cloudflared/buildinfo" "github.com/cloudflare/cloudflared/h2mux" "github.com/cloudflare/cloudflared/signal" + "github.com/cloudflare/cloudflared/streamhandler" "github.com/cloudflare/cloudflared/tunnelrpc" tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs" "github.com/cloudflare/cloudflared/validation" @@ -41,7 +42,7 @@ const ( ) type TunnelConfig struct { - BuildInfo *BuildInfo + BuildInfo *buildinfo.BuildInfo ClientID string ClientTlsConfig *tls.Config CloseConnOnce *sync.Once // Used to close connectedSignal no more than once @@ -140,44 +141,8 @@ func (c *TunnelConfig) RegistrationOptions(connectionID uint8, OriginLocalIP str } } -func StartTunnelDaemon(config *TunnelConfig, shutdownC <-chan struct{}, connectedSignal *signal.Signal) error { - ctx, cancel := context.WithCancel(context.Background()) - go func() { - <-shutdownC - cancel() - }() - - u, err := uuid.NewRandom() - if err != nil { - return err - } - - // If a user specified negative HAConnections, we will treat it as requesting 1 connection - if config.HAConnections > 1 { - if config.UseDeclarativeTunnel { - return connection.NewSupervisor(&connection.CloudflaredConfig{ - ConnectionConfig: &connection.ConnectionConfig{ - TLSConfig: config.TlsConfig, - HeartbeatInterval: config.HeartbeatInterval, - MaxHeartbeats: config.MaxHeartbeats, - Logger: config.Logger.WithField("subsystem", "connection_supervisor"), - }, - OriginCert: config.OriginCert, - Tags: config.Tags, - EdgeAddrs: config.EdgeAddrs, - HAConnections: uint(config.HAConnections), - Logger: config.Logger, - CloudflaredVersion: config.ReportedVersion, - }).Run(ctx) - } - return NewSupervisor(config).Run(ctx, connectedSignal, u) - } else { - addrs, err := connection.ResolveEdgeIPs(config.Logger, config.EdgeAddrs) - if err != nil { - return err - } - return ServeTunnelLoop(ctx, config, addrs[0], 0, connectedSignal, u) - } +func StartTunnelDaemon(ctx context.Context, config *TunnelConfig, connectedSignal *signal.Signal, cloudflaredID uuid.UUID) error { + return NewSupervisor(config).Run(ctx, connectedSignal, cloudflaredID) } func ServeTunnelLoop(ctx context.Context, @@ -472,39 +437,6 @@ func LogServerInfo( metrics.registerServerLocation(uint8ToString(connectionID), serverInfo.LocationName) } -func H2RequestHeadersToH1Request(h2 []h2mux.Header, h1 *http.Request) error { - for _, header := range h2 { - switch header.Name { - case ":method": - h1.Method = header.Value - case ":scheme": - case ":authority": - // Otherwise the host header will be based on the origin URL - h1.Host = header.Value - case ":path": - u, err := url.Parse(header.Value) - if err != nil { - return fmt.Errorf("unparseable path") - } - resolved := h1.URL.ResolveReference(u) - // prevent escaping base URL - if !strings.HasPrefix(resolved.String(), h1.URL.String()) { - return fmt.Errorf("invalid path") - } - h1.URL = resolved - case "content-length": - contentLength, err := strconv.ParseInt(header.Value, 10, 64) - if err != nil { - return fmt.Errorf("unparseable content length") - } - h1.ContentLength = contentLength - default: - h1.Header.Add(http.CanonicalHeaderKey(header.Name), header.Value) - } - } - return nil -} - func H1ResponseToH2Response(h1 *http.Response) (h2 []h2mux.Header) { h2 = []h2mux.Header{{Name: ":status", Value: fmt.Sprintf("%d", h1.StatusCode)}} for headerName, headerValues := range h1.Header { @@ -515,10 +447,6 @@ func H1ResponseToH2Response(h1 *http.Response) (h2 []h2mux.Header) { return } -func FindCfRayHeader(h1 *http.Request) string { - return h1.Header.Get("Cf-Ray") -} - type TunnelHandler struct { originUrl string httpHostHeader string @@ -608,8 +536,8 @@ func (h *TunnelHandler) ServeStream(stream *h2mux.MuxedStream) error { return reqErr } - cfRay := FindCfRayHeader(req) - lbProbe := isLBProbeRequest(req) + cfRay := streamhandler.FindCfRayHeader(req) + lbProbe := streamhandler.IsLBProbeRequest(req) h.logRequest(req, cfRay, lbProbe) var resp *http.Response @@ -632,8 +560,7 @@ func (h *TunnelHandler) createRequest(stream *h2mux.MuxedStream) (*http.Request, if err != nil { return nil, errors.Wrap(err, "Unexpected error from http.NewRequest") } - - err = H2RequestHeadersToH1Request(stream.Headers, req) + err = streamhandler.H2RequestHeadersToH1Request(stream.Headers, req) if err != nil { return nil, errors.Wrap(err, "invalid request received") } @@ -773,10 +700,6 @@ func uint8ToString(input uint8) string { return strconv.FormatUint(uint64(input), 10) } -func isLBProbeRequest(req *http.Request) bool { - return strings.HasPrefix(req.UserAgent(), lbProbeUserAgentPrefix) -} - // Print out the given lines in a nice ASCII box. func asciiBox(lines []string, padding int) (box []string) { maxLen := maxLen(lines) diff --git a/originservice/originservice.go b/originservice/originservice.go index 34aadb8f..3cd0af53 100644 --- a/originservice/originservice.go +++ b/originservice/originservice.go @@ -8,6 +8,7 @@ import ( "io" "net" "net/http" + "net/url" "strconv" "strings" @@ -22,20 +23,22 @@ import ( // OriginService is an interface to proxy requests to different type of origins type OriginService interface { Proxy(stream *h2mux.MuxedStream, req *http.Request) (resp *http.Response, err error) + URL() *url.URL + Summary() string Shutdown() } // HTTPService talks to origin using HTTP/HTTPS type HTTPService struct { client http.RoundTripper - originAddr string + originURL *url.URL chunkedEncoding bool } -func NewHTTPService(transport http.RoundTripper, originAddr string, chunkedEncoding bool) OriginService { +func NewHTTPService(transport http.RoundTripper, url *url.URL, chunkedEncoding bool) OriginService { return &HTTPService{ client: transport, - originAddr: originAddr, + originURL: url, chunkedEncoding: chunkedEncoding, } } @@ -55,13 +58,13 @@ func (hc *HTTPService) Proxy(stream *h2mux.MuxedStream, req *http.Request) (*htt resp, err := hc.client.RoundTrip(req) if err != nil { - return nil, errors.Wrap(err, "Error proxying request to HTTP origin") + return nil, errors.Wrap(err, "error proxying request to HTTP origin") } defer resp.Body.Close() err = stream.WriteHeaders(h1ResponseToH2Response(resp)) if err != nil { - return nil, errors.Wrap(err, "Error writing response header to HTTP origin") + return nil, errors.Wrap(err, "error writing response header to HTTP origin") } if isEventStream(resp) { writeEventStream(stream, resp.Body) @@ -73,30 +76,43 @@ func (hc *HTTPService) Proxy(stream *h2mux.MuxedStream, req *http.Request) (*htt return resp, nil } +func (hc *HTTPService) URL() *url.URL { + return hc.originURL +} + +func (hc *HTTPService) Summary() string { + return fmt.Sprintf("HTTP service listening on %s", hc.originURL) +} + func (hc *HTTPService) Shutdown() {} // WebsocketService talks to origin using WS/WSS type WebsocketService struct { tlsConfig *tls.Config + originURL *url.URL shutdownC chan struct{} } -func NewWebSocketService(tlsConfig *tls.Config, url string) (OriginService, error) { +func NewWebSocketService(tlsConfig *tls.Config, url *url.URL) (OriginService, error) { listener, err := net.Listen("tcp", "127.0.0.1:") if err != nil { - return nil, errors.Wrap(err, "Cannot start Websocket Proxy Server") + return nil, errors.Wrap(err, "cannot start Websocket Proxy Server") } shutdownC := make(chan struct{}) go func() { - websocket.StartProxyServer(log.CreateLogger(), listener, url, shutdownC) + websocket.StartProxyServer(log.CreateLogger(), listener, url.String(), shutdownC) }() return &WebsocketService{ tlsConfig: tlsConfig, + originURL: url, shutdownC: shutdownC, }, nil } -func (wsc *WebsocketService) Proxy(stream *h2mux.MuxedStream, req *http.Request) (response *http.Response, err error) { +func (wsc *WebsocketService) Proxy(stream *h2mux.MuxedStream, req *http.Request) (*http.Response, error) { + if !websocket.IsWebSocketUpgrade(req) { + return nil, fmt.Errorf("request is not a websocket connection") + } conn, response, err := websocket.ClientConnect(req, wsc.tlsConfig) if err != nil { return nil, err @@ -104,7 +120,7 @@ func (wsc *WebsocketService) Proxy(stream *h2mux.MuxedStream, req *http.Request) defer conn.Close() err = stream.WriteHeaders(h1ResponseToH2Response(response)) if err != nil { - return nil, errors.Wrap(err, "Error writing response header to websocket origin") + return nil, errors.Wrap(err, "error writing response header to websocket origin") } // Copy to/from stream to the undelying connection. Use the underlying // connection because cloudflared doesn't operate on the message themselves @@ -112,6 +128,14 @@ func (wsc *WebsocketService) Proxy(stream *h2mux.MuxedStream, req *http.Request) return response, nil } +func (wsc *WebsocketService) URL() *url.URL { + return wsc.originURL +} + +func (wsc *WebsocketService) Summary() string { + return fmt.Sprintf("Websocket listening on %s", wsc.originURL) +} + func (wsc *WebsocketService) Shutdown() { close(wsc.shutdownC) } @@ -120,21 +144,26 @@ func (wsc *WebsocketService) Shutdown() { type HelloWorldService struct { client http.RoundTripper listener net.Listener + originURL *url.URL shutdownC chan struct{} } func NewHelloWorldService(transport http.RoundTripper) (OriginService, error) { listener, err := hello.CreateTLSListener("127.0.0.1:") if err != nil { - return nil, errors.Wrap(err, "Cannot start Hello World Server") + return nil, errors.Wrap(err, "cannot start Hello World Server") } shutdownC := make(chan struct{}) go func() { hello.StartHelloWorldServer(log.CreateLogger(), listener, shutdownC) }() return &HelloWorldService{ - client: transport, - listener: listener, + client: transport, + listener: listener, + originURL: &url.URL{ + Scheme: "https", + Host: listener.Addr().String(), + }, shutdownC: shutdownC, }, nil } @@ -142,16 +171,15 @@ func NewHelloWorldService(transport http.RoundTripper) (OriginService, error) { func (hwc *HelloWorldService) Proxy(stream *h2mux.MuxedStream, req *http.Request) (*http.Response, error) { // Request origin to keep connection alive to improve performance req.Header.Set("Connection", "keep-alive") - resp, err := hwc.client.RoundTrip(req) if err != nil { - return nil, errors.Wrap(err, "Error proxying request to Hello World origin") + return nil, errors.Wrap(err, "error proxying request to Hello World origin") } defer resp.Body.Close() err = stream.WriteHeaders(h1ResponseToH2Response(resp)) if err != nil { - return nil, errors.Wrap(err, "Error writing response header to Hello World origin") + return nil, errors.Wrap(err, "error writing response header to Hello World origin") } // Use CopyBuffer, because Copy only allocates a 32KiB buffer, and cross-stream @@ -161,6 +189,14 @@ func (hwc *HelloWorldService) Proxy(stream *h2mux.MuxedStream, req *http.Request return resp, nil } +func (hwc *HelloWorldService) URL() *url.URL { + return hwc.originURL +} + +func (hwc *HelloWorldService) Summary() string { + return fmt.Sprintf("Hello World service listening on %s", hwc.originURL) +} + func (hwc *HelloWorldService) Shutdown() { hwc.listener.Close() } diff --git a/streamhandler/request.go b/streamhandler/request.go new file mode 100644 index 00000000..40791d06 --- /dev/null +++ b/streamhandler/request.go @@ -0,0 +1,69 @@ +package streamhandler + +import ( + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + + "github.com/cloudflare/cloudflared/h2mux" + "github.com/pkg/errors" +) + +const ( + lbProbeUserAgentPrefix = "Mozilla/5.0 (compatible; Cloudflare-Traffic-Manager/1.0; +https://www.cloudflare.com/traffic-manager/;" +) + +func FindCfRayHeader(h1 *http.Request) string { + return h1.Header.Get("Cf-Ray") +} + +func IsLBProbeRequest(req *http.Request) bool { + return strings.HasPrefix(req.UserAgent(), lbProbeUserAgentPrefix) +} + +func createRequest(stream *h2mux.MuxedStream, url *url.URL) (*http.Request, error) { + req, err := http.NewRequest(http.MethodGet, url.String(), h2mux.MuxedStreamReader{MuxedStream: stream}) + if err != nil { + return nil, errors.Wrap(err, "unexpected error from http.NewRequest") + } + err = H2RequestHeadersToH1Request(stream.Headers, req) + if err != nil { + return nil, errors.Wrap(err, "invalid request received") + } + return req, nil +} + +func H2RequestHeadersToH1Request(h2 []h2mux.Header, h1 *http.Request) error { + for _, header := range h2 { + switch header.Name { + case ":method": + h1.Method = header.Value + case ":scheme": + case ":authority": + // Otherwise the host header will be based on the origin URL + h1.Host = header.Value + case ":path": + u, err := url.Parse(header.Value) + if err != nil { + return fmt.Errorf("unparseable path") + } + resolved := h1.URL.ResolveReference(u) + // prevent escaping base URL + if !strings.HasPrefix(resolved.String(), h1.URL.String()) { + return fmt.Errorf("invalid path") + } + h1.URL = resolved + case "content-length": + contentLength, err := strconv.ParseInt(header.Value, 10, 64) + if err != nil { + return fmt.Errorf("unparseable content length") + } + h1.ContentLength = contentLength + default: + h1.Header.Add(http.CanonicalHeaderKey(header.Name), header.Value) + } + } + return nil +} diff --git a/streamhandler/stream_handler.go b/streamhandler/stream_handler.go new file mode 100644 index 00000000..83d65d90 --- /dev/null +++ b/streamhandler/stream_handler.go @@ -0,0 +1,183 @@ +package streamhandler + +import ( + "context" + "fmt" + "net/http" + "strconv" + + "github.com/cloudflare/cloudflared/h2mux" + "github.com/cloudflare/cloudflared/tunnelhostnamemapper" + "github.com/cloudflare/cloudflared/tunnelrpc" + "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "zombiezen.com/go/capnproto2/rpc" +) + +const ( + statusPseudoHeader = ":status" +) + +type httpErrorStatus struct { + status string + text []byte +} + +var ( + statusBadRequest = newHTTPErrorStatus(http.StatusBadRequest) + statusNotFound = newHTTPErrorStatus(http.StatusNotFound) + statusBadGateway = newHTTPErrorStatus(http.StatusBadGateway) +) + +func newHTTPErrorStatus(status int) *httpErrorStatus { + return &httpErrorStatus{ + status: strconv.Itoa(status), + text: []byte(http.StatusText(status)), + } +} + +// StreamHandler handles new stream opened by the edge. The streams can be used to proxy requests or make RPC. +type StreamHandler struct { + // newConfigChan is a send-only channel to notify Supervisor of a new ClientConfig + newConfigChan chan<- *pogs.ClientConfig + // useConfigResultChan is a receive-only channel for Supervisor to communicate the result of applying a new ClientConfig + useConfigResultChan <-chan *pogs.UseConfigurationResult + // originMapper maps tunnel hostname to origin service + tunnelHostnameMapper *tunnelhostnamemapper.TunnelHostnameMapper + logger *logrus.Entry +} + +// NewStreamHandler creates a new StreamHandler +func NewStreamHandler(newConfigChan chan<- *pogs.ClientConfig, + useConfigResultChan <-chan *pogs.UseConfigurationResult, + logger *logrus.Logger, +) *StreamHandler { + return &StreamHandler{ + newConfigChan: newConfigChan, + useConfigResultChan: useConfigResultChan, + tunnelHostnameMapper: tunnelhostnamemapper.NewTunnelHostnameMapper(), + logger: logger.WithField("subsystem", "streamHandler"), + } +} + +// UseConfiguration implements ClientService +func (s *StreamHandler) UseConfiguration(ctx context.Context, config *pogs.ClientConfig) (*pogs.UseConfigurationResult, error) { + select { + case <-ctx.Done(): + err := fmt.Errorf("Timeout while sending new config to Supervisor") + s.logger.Error(err) + return nil, err + case s.newConfigChan <- config: + } + select { + case <-ctx.Done(): + err := fmt.Errorf("Timeout applying new configuration") + s.logger.Error(err) + return nil, err + case result := <-s.useConfigResultChan: + return result, nil + } +} + +// UpdateConfig replaces current originmapper mapping with mappings from newConfig +func (s *StreamHandler) UpdateConfig(newConfig []*pogs.ReverseProxyConfig) (failedConfigs []*pogs.FailedConfig) { + // TODO: TUN-1968: Gracefully apply new config + s.tunnelHostnameMapper.DeleteAll() + for _, tunnelConfig := range newConfig { + tunnelHostname := tunnelConfig.TunnelHostname + originSerice, err := tunnelConfig.Origin.Service() + if err != nil { + s.logger.WithField("tunnelHostname", tunnelHostname).WithError(err).Error("Invalid origin service config") + failedConfigs = append(failedConfigs, &pogs.FailedConfig{ + Config: tunnelConfig, + Reason: tunnelConfig.FailReason(err), + }) + continue + } + s.tunnelHostnameMapper.Add(tunnelConfig.TunnelHostname, originSerice) + s.logger.WithField("tunnelHostname", tunnelHostname).Infof("New origin service config: %v", originSerice.Summary()) + } + return +} + +// ServeStream implements MuxedStreamHandler interface +func (s *StreamHandler) ServeStream(stream *h2mux.MuxedStream) error { + if stream.IsRPCStream() { + return s.serveRPC(stream) + } + if err := s.serveRequest(stream); err != nil { + s.logger.Error(err) + return err + } + return nil +} + +func (s *StreamHandler) serveRPC(stream *h2mux.MuxedStream) error { + stream.WriteHeaders([]h2mux.Header{{Name: ":status", Value: "200"}}) + main := pogs.ClientService_ServerToClient(s) + rpcLogger := s.logger.WithField("subsystem", "clientserver-rpc") + rpcConn := rpc.NewConn( + tunnelrpc.NewTransportLogger(rpcLogger, rpc.StreamTransport(stream)), + rpc.MainInterface(main.Client), + tunnelrpc.ConnLog(s.logger.WithField("subsystem", "clientserver-rpc-transport")), + ) + return rpcConn.Wait() +} + +func (s *StreamHandler) serveRequest(stream *h2mux.MuxedStream) error { + tunnelHostname := stream.TunnelHostname() + if !tunnelHostname.IsSet() { + s.writeErrorStatus(stream, statusBadRequest) + return fmt.Errorf("stream doesn't have tunnelHostname") + } + + originService, ok := s.tunnelHostnameMapper.Get(tunnelHostname) + if !ok { + s.writeErrorStatus(stream, statusNotFound) + return fmt.Errorf("cannot map tunnel hostname %s to origin", tunnelHostname) + } + + req, err := createRequest(stream, originService.URL()) + if err != nil { + s.writeErrorStatus(stream, statusBadRequest) + return errors.Wrap(err, "cannot create request") + } + + logger := s.requestLogger(req, tunnelHostname) + logger.Debugf("Request Headers %+v", req.Header) + + resp, err := originService.Proxy(stream, req) + if err != nil { + s.writeErrorStatus(stream, statusBadGateway) + return errors.Wrap(err, "cannot proxy request") + } + + logger.WithField("status", resp.Status).Debugf("Response Headers %+v", resp.Header) + return nil +} + +func (s *StreamHandler) requestLogger(req *http.Request, tunnelHostname h2mux.TunnelHostname) *logrus.Entry { + cfRay := FindCfRayHeader(req) + lbProbe := IsLBProbeRequest(req) + logger := s.logger.WithField("tunnelHostname", tunnelHostname) + if cfRay != "" { + logger = logger.WithField("CF-RAY", cfRay) + logger.Debugf("%s %s %s", req.Method, req.URL, req.Proto) + } else if lbProbe { + logger.Debugf("Load Balancer health check %s %s %s", req.Method, req.URL, req.Proto) + } else { + logger.Warnf("Requests %v does not have CF-RAY header. Please open a support ticket with Cloudflare.", req) + } + return logger +} + +func (s *StreamHandler) writeErrorStatus(stream *h2mux.MuxedStream, status *httpErrorStatus) { + stream.WriteHeaders([]h2mux.Header{ + { + Name: statusPseudoHeader, + Value: status.status, + }, + }) + stream.Write(status.text) +} diff --git a/streamhandler/stream_handler_test.go b/streamhandler/stream_handler_test.go new file mode 100644 index 00000000..c2d95b69 --- /dev/null +++ b/streamhandler/stream_handler_test.go @@ -0,0 +1,223 @@ +package streamhandler + +import ( + "context" + "io" + "net" + "net/http" + "net/http/httptest" + "strconv" + "sync" + "testing" + "time" + + "github.com/cloudflare/cloudflared/h2mux" + "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "github.com/stretchr/testify/assert" + "golang.org/x/sync/errgroup" +) + +const ( + testOpenStreamTimeout = time.Millisecond * 5000 + testHandshakeTimeout = time.Millisecond * 1000 +) + +var ( + testTunnelHostname = h2mux.TunnelHostname("123.cftunnel.com") + baseHeaders = []h2mux.Header{ + {Name: ":method", Value: "GET"}, + {Name: ":scheme", Value: "http"}, + {Name: ":authority", Value: "example.com"}, + {Name: ":path", Value: "/"}, + } + tunnelHostnameHeader = h2mux.Header{ + Name: h2mux.CloudflaredProxyTunnelHostnameHeader, + Value: testTunnelHostname.String(), + } +) + +func TestServeRequest(t *testing.T) { + configChan := make(chan *pogs.ClientConfig) + useConfigResultChan := make(chan *pogs.UseConfigurationResult) + streamHandler := NewStreamHandler(configChan, useConfigResultChan, logrus.New()) + + message := []byte("Hello cloudflared") + httpServer := httptest.NewServer(&mockHTTPHandler{message}) + + reverseProxyConfigs := []*pogs.ReverseProxyConfig{ + { + TunnelHostname: testTunnelHostname, + Origin: &pogs.HTTPOriginConfig{ + URLString: httpServer.URL, + }, + }, + } + streamHandler.UpdateConfig(reverseProxyConfigs) + + muxPair := NewDefaultMuxerPair(t, streamHandler) + muxPair.Serve(t) + + ctx, cancel := context.WithTimeout(context.Background(), testOpenStreamTimeout) + defer cancel() + + headers := append(baseHeaders, tunnelHostnameHeader) + stream, err := muxPair.EdgeMux.OpenStream(ctx, headers, nil) + assert.NoError(t, err) + assertStatusHeader(t, http.StatusOK, stream.Headers) + assertRespBody(t, message, stream) +} + +func TestServeBadRequest(t *testing.T) { + configChan := make(chan *pogs.ClientConfig) + useConfigResultChan := make(chan *pogs.UseConfigurationResult) + streamHandler := NewStreamHandler(configChan, useConfigResultChan, logrus.New()) + + muxPair := NewDefaultMuxerPair(t, streamHandler) + muxPair.Serve(t) + + ctx, cancel := context.WithTimeout(context.Background(), testOpenStreamTimeout) + defer cancel() + + // No tunnel hostname header, expect to get 400 Bad Request + stream, err := muxPair.EdgeMux.OpenStream(ctx, baseHeaders, nil) + assert.NoError(t, err) + assertStatusHeader(t, http.StatusBadRequest, stream.Headers) + assertRespBody(t, statusBadRequest.text, stream) + + // No mapping for the tunnel hostname, expect to get 404 Not Found + headers := append(baseHeaders, tunnelHostnameHeader) + stream, err = muxPair.EdgeMux.OpenStream(ctx, headers, nil) + assert.NoError(t, err) + assertStatusHeader(t, http.StatusNotFound, stream.Headers) + assertRespBody(t, statusNotFound.text, stream) + + // Nothing listening on empty url, so proxy would fail. Expect to get 502 Bad Gateway + reverseProxyConfigs := []*pogs.ReverseProxyConfig{ + { + TunnelHostname: testTunnelHostname, + Origin: &pogs.HTTPOriginConfig{ + URLString: "", + }, + }, + } + streamHandler.UpdateConfig(reverseProxyConfigs) + stream, err = muxPair.EdgeMux.OpenStream(ctx, headers, nil) + assert.NoError(t, err) + assertStatusHeader(t, http.StatusBadGateway, stream.Headers) + assertRespBody(t, statusBadGateway.text, stream) + + // Invalid content-length, wouldn't not be able to create a request. Expect to get 400 Bad Request + headers = append(headers, h2mux.Header{ + Name: "content-length", + Value: "x", + }) + stream, err = muxPair.EdgeMux.OpenStream(ctx, headers, nil) + assert.NoError(t, err) + assertStatusHeader(t, http.StatusBadRequest, stream.Headers) + assertRespBody(t, statusBadRequest.text, stream) +} + +func assertStatusHeader(t *testing.T, expectedStatus int, headers []h2mux.Header) { + assert.Equal(t, statusPseudoHeader, headers[0].Name) + assert.Equal(t, strconv.Itoa(expectedStatus), headers[0].Value) +} + +func assertRespBody(t *testing.T, expectedRespBody []byte, stream *h2mux.MuxedStream) { + respBody := make([]byte, len(expectedRespBody)) + _, err := stream.Read(respBody) + assert.NoError(t, err) + assert.Equal(t, expectedRespBody, respBody) +} + +type DefaultMuxerPair struct { + OriginMuxConfig h2mux.MuxerConfig + OriginMux *h2mux.Muxer + OriginConn net.Conn + EdgeMuxConfig h2mux.MuxerConfig + EdgeMux *h2mux.Muxer + EdgeConn net.Conn + doneC chan struct{} +} + +func NewDefaultMuxerPair(t assert.TestingT, h h2mux.MuxedStreamHandler) *DefaultMuxerPair { + origin, edge := net.Pipe() + p := &DefaultMuxerPair{ + OriginMuxConfig: h2mux.MuxerConfig{ + Timeout: testHandshakeTimeout, + Handler: h, + IsClient: true, + Name: "origin", + Logger: logrus.NewEntry(logrus.New()), + DefaultWindowSize: (1 << 8) - 1, + MaxWindowSize: (1 << 15) - 1, + StreamWriteBufferMaxLen: 1024, + }, + OriginConn: origin, + EdgeMuxConfig: h2mux.MuxerConfig{ + Timeout: testHandshakeTimeout, + IsClient: false, + Name: "edge", + Logger: logrus.NewEntry(logrus.New()), + DefaultWindowSize: (1 << 8) - 1, + MaxWindowSize: (1 << 15) - 1, + StreamWriteBufferMaxLen: 1024, + }, + EdgeConn: edge, + doneC: make(chan struct{}), + } + assert.NoError(t, p.Handshake()) + return p +} + +func (p *DefaultMuxerPair) Handshake() error { + ctx, cancel := context.WithTimeout(context.Background(), testHandshakeTimeout) + defer cancel() + errGroup, _ := errgroup.WithContext(ctx) + errGroup.Go(func() (err error) { + p.EdgeMux, err = h2mux.Handshake(p.EdgeConn, p.EdgeConn, p.EdgeMuxConfig) + return errors.Wrap(err, "edge handshake failure") + }) + errGroup.Go(func() (err error) { + p.OriginMux, err = h2mux.Handshake(p.OriginConn, p.OriginConn, p.OriginMuxConfig) + return errors.Wrap(err, "origin handshake failure") + }) + + return errGroup.Wait() +} + +func (p *DefaultMuxerPair) Serve(t assert.TestingT) { + ctx := context.Background() + var wg sync.WaitGroup + wg.Add(2) + go func() { + err := p.EdgeMux.Serve(ctx) + if err != nil && err != io.EOF && err != io.ErrClosedPipe { + t.Errorf("error in edge muxer Serve(): %s", err) + } + p.OriginMux.Shutdown() + wg.Done() + }() + go func() { + err := p.OriginMux.Serve(ctx) + if err != nil && err != io.EOF && err != io.ErrClosedPipe { + t.Errorf("error in origin muxer Serve(): %s", err) + } + p.EdgeMux.Shutdown() + wg.Done() + }() + go func() { + // notify when both muxes have stopped serving + wg.Wait() + close(p.doneC) + }() +} + +type mockHTTPHandler struct { + message []byte +} + +func (mth *mockHTTPHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.Write(mth.message) +} diff --git a/supervisor/supervisor.go b/supervisor/supervisor.go new file mode 100644 index 00000000..a4fcee22 --- /dev/null +++ b/supervisor/supervisor.go @@ -0,0 +1,179 @@ +package supervisor + +import ( + "context" + "crypto/tls" + "fmt" + "os" + "os/signal" + "sync" + "syscall" + + "golang.org/x/sync/errgroup" + + "github.com/cloudflare/cloudflared/cmd/cloudflared/updater" + "github.com/cloudflare/cloudflared/connection" + "github.com/cloudflare/cloudflared/h2mux" + "github.com/cloudflare/cloudflared/streamhandler" + "github.com/cloudflare/cloudflared/tunnelrpc/pogs" + "github.com/sirupsen/logrus" +) + +type Supervisor struct { + connManager *connection.EdgeManager + streamHandler *streamhandler.StreamHandler + autoupdater *updater.AutoUpdater + supportAutoupdate bool + newConfigChan <-chan *pogs.ClientConfig + useConfigResultChan chan<- *pogs.UseConfigurationResult + state *state + logger *logrus.Entry +} + +func NewSupervisor( + defaultClientConfig *pogs.ClientConfig, + userCredential []byte, + tlsConfig *tls.Config, + serviceDiscoverer connection.EdgeServiceDiscoverer, + cloudflaredConfig *connection.CloudflaredConfig, + autoupdater *updater.AutoUpdater, + supportAutoupdate bool, + logger *logrus.Logger, +) (*Supervisor, error) { + newConfigChan := make(chan *pogs.ClientConfig) + useConfigResultChan := make(chan *pogs.UseConfigurationResult) + streamHandler := streamhandler.NewStreamHandler(newConfigChan, useConfigResultChan, logger) + invalidConfigs := streamHandler.UpdateConfig(defaultClientConfig.ReverseProxyConfigs) + + if len(invalidConfigs) > 0 { + for _, invalidConfig := range invalidConfigs { + logger.Errorf("Tunnel %+v is invalid, reason: %s", invalidConfig.Config, invalidConfig.Reason) + } + return nil, fmt.Errorf("At least 1 Tunnel config is invalid") + } + + tunnelHostnames := make([]h2mux.TunnelHostname, len(defaultClientConfig.ReverseProxyConfigs)) + for i, reverseProxyConfig := range defaultClientConfig.ReverseProxyConfigs { + tunnelHostnames[i] = reverseProxyConfig.TunnelHostname + } + defaultEdgeMgrConfigurable := &connection.EdgeManagerConfigurable{ + tunnelHostnames, + defaultClientConfig.EdgeConnectionConfig, + } + return &Supervisor{ + connManager: connection.NewEdgeManager(streamHandler, defaultEdgeMgrConfigurable, userCredential, tlsConfig, + serviceDiscoverer, cloudflaredConfig, logger), + streamHandler: streamHandler, + autoupdater: autoupdater, + supportAutoupdate: supportAutoupdate, + newConfigChan: newConfigChan, + useConfigResultChan: useConfigResultChan, + state: newState(defaultClientConfig), + logger: logger.WithField("subsystem", "supervisor"), + }, nil +} + +func (s *Supervisor) Run(ctx context.Context) error { + errGroup, groupCtx := errgroup.WithContext(ctx) + + errGroup.Go(func() error { + return s.connManager.Run(groupCtx) + }) + + errGroup.Go(func() error { + return s.listenToNewConfig(groupCtx) + }) + + errGroup.Go(func() error { + return s.listenToShutdownSignal(groupCtx) + }) + + if s.supportAutoupdate { + errGroup.Go(func() error { + return s.autoupdater.Run(groupCtx) + }) + } + + err := errGroup.Wait() + s.logger.Warnf("Supervisor terminated, reason: %v", err) + return err +} + +func (s *Supervisor) listenToShutdownSignal(serveCtx context.Context) error { + signals := make(chan os.Signal, 10) + signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT) + defer signal.Stop(signals) + + select { + case <-serveCtx.Done(): + return serveCtx.Err() + case sig := <-signals: + return fmt.Errorf("received %v signal", sig) + } +} + +func (s *Supervisor) listenToNewConfig(ctx context.Context) error { + for { + select { + case <-ctx.Done(): + return ctx.Err() + case newConfig := <-s.newConfigChan: + s.useConfigResultChan <- s.notifySubsystemsNewConfig(newConfig) + } + } +} + +func (s *Supervisor) notifySubsystemsNewConfig(newConfig *pogs.ClientConfig) *pogs.UseConfigurationResult { + s.logger.Infof("Received configuration %v", newConfig.Version) + if s.state.hasAppliedVersion(newConfig.Version) { + s.logger.Infof("%v has been applied", newConfig.Version) + return &pogs.UseConfigurationResult{ + Success: true, + } + } + + s.state.updateConfig(newConfig) + var tunnelHostnames []h2mux.TunnelHostname + for _, tunnelConfig := range newConfig.ReverseProxyConfigs { + tunnelHostnames = append(tunnelHostnames, tunnelConfig.TunnelHostname) + } + // Update connManager configurable + s.connManager.UpdateConfigurable(&connection.EdgeManagerConfigurable{ + tunnelHostnames, + newConfig.EdgeConnectionConfig, + }) + // Update streamHandler tunnelHostnameMapper mapping + failedConfigs := s.streamHandler.UpdateConfig(newConfig.ReverseProxyConfigs) + + if s.supportAutoupdate { + s.autoupdater.Update(newConfig.SupervisorConfig.AutoUpdateFrequency) + } + + return &pogs.UseConfigurationResult{ + Success: len(failedConfigs) == 0, + FailedConfigs: failedConfigs, + } +} + +type state struct { + sync.RWMutex + currentConfig *pogs.ClientConfig +} + +func newState(currentConfig *pogs.ClientConfig) *state { + return &state{ + currentConfig: currentConfig, + } +} + +func (s *state) hasAppliedVersion(incomingVersion pogs.Version) bool { + s.RLock() + defer s.RUnlock() + return s.currentConfig.Version.IsNewerOrEqual(incomingVersion) +} + +func (s *state) updateConfig(newConfig *pogs.ClientConfig) { + s.Lock() + defer s.Unlock() + s.currentConfig = newConfig +} diff --git a/tlsconfig/certreloader.go b/tlsconfig/certreloader.go index 00ebab39..5ce83934 100644 --- a/tlsconfig/certreloader.go +++ b/tlsconfig/certreloader.go @@ -89,16 +89,35 @@ func LoadOriginCA(c *cli.Context, logger *logrus.Logger) (*x509.CertPool, error) return originCertPool, nil } -func LoadCustomCertPool(customCertFilename string) (*x509.CertPool, error) { - pool := x509.NewCertPool() - customCAPoolPEM, err := ioutil.ReadFile(customCertFilename) +func LoadCustomOriginCA(originCAFilename string) (*x509.CertPool, error) { + // First, obtain the system certificate pool + certPool, err := x509.SystemCertPool() if err != nil { - return nil, errors.Wrap(err, fmt.Sprintf("unable to read the file %s", customCertFilename)) + certPool = x509.NewCertPool() } - if !pool.AppendCertsFromPEM(customCAPoolPEM) { + + // Next, append the Cloudflare CAs into the system pool + cfRootCA, err := GetCloudflareRootCA() + if err != nil { + return nil, errors.Wrap(err, "could not append Cloudflare Root CAs to cloudflared certificate pool") + } + for _, cert := range cfRootCA { + certPool.AddCert(cert) + } + + if originCAFilename == "" { + return certPool, nil + } + + customOriginCA, err := ioutil.ReadFile(originCAFilename) + if err != nil { + return nil, errors.Wrap(err, fmt.Sprintf("unable to read the file %s", originCAFilename)) + } + + if !certPool.AppendCertsFromPEM(customOriginCA) { return nil, fmt.Errorf("error appending custom CA to cert pool") } - return pool, nil + return certPool, nil } func CreateTunnelConfig(c *cli.Context) (*tls.Config, error) { diff --git a/tunneldns/https_upstream.go b/tunneldns/https_upstream.go index 0544112a..ac9b60ec 100644 --- a/tunneldns/https_upstream.go +++ b/tunneldns/https_upstream.go @@ -82,7 +82,7 @@ func (u *UpstreamHTTPS) exchangeWireformat(msg []byte) ([]byte, error) { return nil, errors.Wrap(err, "failed to create an HTTPS request") } - req.Header.Add("Content-Type", "application/dns-udpwireformat") + req.Header.Add("Content-Type", "application/dns-message") req.Host = u.endpoint.Hostname() resp, err := u.client.Do(req) diff --git a/tunnelhostnamemapper/tunnelhostnamemapper.go b/tunnelhostnamemapper/tunnelhostnamemapper.go new file mode 100644 index 00000000..bb8f70f1 --- /dev/null +++ b/tunnelhostnamemapper/tunnelhostnamemapper.go @@ -0,0 +1,49 @@ +package tunnelhostnamemapper + +import ( + "sync" + + "github.com/cloudflare/cloudflared/h2mux" + "github.com/cloudflare/cloudflared/originservice" +) + +// TunnelHostnameMapper maps TunnelHostname to an OriginService +type TunnelHostnameMapper struct { + sync.RWMutex + tunnelHostnameToOrigin map[h2mux.TunnelHostname]originservice.OriginService +} + +func NewTunnelHostnameMapper() *TunnelHostnameMapper { + return &TunnelHostnameMapper{ + tunnelHostnameToOrigin: make(map[h2mux.TunnelHostname]originservice.OriginService), + } +} + +// Get an OriginService given a TunnelHostname +func (om *TunnelHostnameMapper) Get(key h2mux.TunnelHostname) (originservice.OriginService, bool) { + om.RLock() + defer om.RUnlock() + originService, ok := om.tunnelHostnameToOrigin[key] + return originService, ok +} + +// Add a mapping. If there is already an OriginService with this key, shutdown the old origin service and replace it +// with the new one +func (om *TunnelHostnameMapper) Add(key h2mux.TunnelHostname, os originservice.OriginService) { + om.Lock() + defer om.Unlock() + if oldOS, ok := om.tunnelHostnameToOrigin[key]; ok { + oldOS.Shutdown() + } + om.tunnelHostnameToOrigin[key] = os +} + +// DeleteAll mappings, and shutdown all OriginService +func (om *TunnelHostnameMapper) DeleteAll() { + om.Lock() + defer om.Unlock() + for key, os := range om.tunnelHostnameToOrigin { + os.Shutdown() + delete(om.tunnelHostnameToOrigin, key) + } +} diff --git a/tunnelhostnamemapper/tunnelhostnamemapper_test.go b/tunnelhostnamemapper/tunnelhostnamemapper_test.go new file mode 100644 index 00000000..e38d0611 --- /dev/null +++ b/tunnelhostnamemapper/tunnelhostnamemapper_test.go @@ -0,0 +1,74 @@ +package tunnelhostnamemapper + +import ( + "fmt" + "net/http" + "net/url" + "sync" + "testing" + + "github.com/cloudflare/cloudflared/h2mux" + "github.com/cloudflare/cloudflared/originservice" + "github.com/stretchr/testify/assert" +) + +const ( + routines = 1000 +) + +func TestTunnelHostnameMapperConcurrentAccess(t *testing.T) { + thm := NewTunnelHostnameMapper() + + concurrentOps(t, func(i int) { + // om is empty + os, ok := thm.Get(tunnelHostname(i)) + assert.False(t, ok) + assert.Nil(t, os) + }) + + firstURL, err := url.Parse("https://127.0.0.1:8080") + assert.NoError(t, err) + httpOS := originservice.NewHTTPService(http.DefaultTransport, firstURL, false) + concurrentOps(t, func(i int) { + thm.Add(tunnelHostname(i), httpOS) + }) + + concurrentOps(t, func(i int) { + os, ok := thm.Get(tunnelHostname(i)) + assert.True(t, ok) + assert.Equal(t, httpOS, os) + }) + + secondURL, err := url.Parse("https://127.0.0.1:8080") + assert.NoError(t, err) + secondHTTPOS := originservice.NewHTTPService(http.DefaultTransport, secondURL, true) + concurrentOps(t, func(i int) { + // Add should httpOS with secondHTTPOS + thm.Add(tunnelHostname(i), secondHTTPOS) + }) + + concurrentOps(t, func(i int) { + os, ok := thm.Get(tunnelHostname(i)) + assert.True(t, ok) + assert.Equal(t, secondHTTPOS, os) + }) + + thm.DeleteAll() + assert.Empty(t, thm.tunnelHostnameToOrigin) +} + +func concurrentOps(t *testing.T, f func(i int)) { + var wg sync.WaitGroup + wg.Add(routines) + for i := 0; i < routines; i++ { + go func(i int) { + f(i) + wg.Done() + }(i) + } + wg.Wait() +} + +func tunnelHostname(i int) h2mux.TunnelHostname { + return h2mux.TunnelHostname(fmt.Sprintf("%d.cftunnel.com", i)) +} diff --git a/tunnelrpc/pogs/config.go b/tunnelrpc/pogs/config.go index 37581f9a..67926a0c 100644 --- a/tunnelrpc/pogs/config.go +++ b/tunnelrpc/pogs/config.go @@ -10,6 +10,7 @@ import ( "net/url" "time" + "github.com/cloudflare/cloudflared/h2mux" "github.com/cloudflare/cloudflared/originservice" "github.com/cloudflare/cloudflared/tlsconfig" "github.com/cloudflare/cloudflared/tunnelrpc" @@ -17,37 +18,83 @@ import ( capnp "zombiezen.com/go/capnproto2" "zombiezen.com/go/capnproto2/pogs" "zombiezen.com/go/capnproto2/rpc" + "zombiezen.com/go/capnproto2/server" ) /// /// Structs /// +// ClientConfig is a collection of FallibleConfig that determines how cloudflared should function type ClientConfig struct { - Version uint64 + Version Version + SupervisorConfig *SupervisorConfig + EdgeConnectionConfig *EdgeConnectionConfig + DoHProxyConfigs []*DoHProxyConfig + ReverseProxyConfigs []*ReverseProxyConfig +} + +// Version type models the version of a ClientConfig +type Version uint64 + +func InitVersion() Version { + return Version(0) +} + +func (v Version) IsNewerOrEqual(comparedVersion Version) bool { + return v >= comparedVersion +} + +func (v Version) String() string { + return fmt.Sprintf("Version: %d", v) +} + +// FallibleConfig is an interface implemented by configs that cloudflared might not be able to apply +type FallibleConfig interface { + FailReason(err error) string +} + +// SupervisorConfig specifies config of components managed by Supervisor other than ConnectionManager +type SupervisorConfig struct { AutoUpdateFrequency time.Duration MetricsUpdateFrequency time.Duration - HeartbeatInterval time.Duration - MaxFailedHeartbeats uint64 GracePeriod time.Duration - DoHProxyConfigs []*DoHProxyConfig - ReverseProxyConfigs []*ReverseProxyConfig - NumHAConnections uint8 } -type UseConfigurationResult struct { - Success bool - ErrorMessage string +// FailReason impelents FallibleConfig interface for SupervisorConfig +func (sc *SupervisorConfig) FailReason(err error) string { + return fmt.Sprintf("Cannot apply SupervisorConfig, err: %v", err) } +// EdgeConnectionConfig specifies what parameters and how may connections should ConnectionManager establish with edge +type EdgeConnectionConfig struct { + NumHAConnections uint8 + HeartbeatInterval time.Duration + Timeout time.Duration + MaxFailedHeartbeats uint64 + UserCredentialPath string +} + +// FailReason impelents FallibleConfig interface for EdgeConnectionConfig +func (cmc *EdgeConnectionConfig) FailReason(err error) string { + return fmt.Sprintf("Cannot apply EdgeConnectionConfig, err: %v", err) +} + +// DoHProxyConfig is configuration for DNS over HTTPS service type DoHProxyConfig struct { ListenHost string ListenPort uint16 Upstreams []string } +// FailReason impelents FallibleConfig interface for DoHProxyConfig +func (dpc *DoHProxyConfig) FailReason(err error) string { + return fmt.Sprintf("Cannot apply DoHProxyConfig, err: %v", err) +} + +// ReverseProxyConfig how and for what hostnames can this cloudflared proxy type ReverseProxyConfig struct { - TunnelHostname string + TunnelHostname h2mux.TunnelHostname Origin OriginConfig Retries uint64 ConnectionTimeout time.Duration @@ -65,7 +112,7 @@ func NewReverseProxyConfig( return nil, fmt.Errorf("NewReverseProxyConfig: originConfig was null") } return &ReverseProxyConfig{ - TunnelHostname: tunnelHostname, + TunnelHostname: h2mux.TunnelHostname(tunnelHostname), Origin: originConfig, Retries: retries, ConnectionTimeout: connectionTimeout, @@ -73,6 +120,11 @@ func NewReverseProxyConfig( }, nil } +// FailReason impelents FallibleConfig interface for ReverseProxyConfig +func (rpc *ReverseProxyConfig) FailReason(err error) string { + return fmt.Sprintf("Cannot apply ReverseProxyConfig, err: %v", err) +} + //go-sumtype:decl OriginConfig type OriginConfig interface { // Service returns a OriginService used to proxy to the origin @@ -82,58 +134,28 @@ type OriginConfig interface { } type HTTPOriginConfig struct { - URL OriginAddr `capnp:"url"` - TCPKeepAlive time.Duration `capnp:"tcpKeepAlive"` - DialDualStack bool - TLSHandshakeTimeout time.Duration `capnp:"tlsHandshakeTimeout"` - TLSVerify bool `capnp:"tlsVerify"` - OriginCAPool string - OriginServerName string - MaxIdleConnections uint64 - IdleConnectionTimeout time.Duration - ProxyConnectTimeout time.Duration - ExpectContinueTimeout time.Duration - ChunkedEncoding bool -} - -type OriginAddr interface { - Addr() string -} - -type HTTPURL struct { - URL *url.URL -} - -func (ha *HTTPURL) Addr() string { - return ha.URL.String() -} - -func (ha *HTTPURL) capnpHTTPURL() *CapnpHTTPURL { - return &CapnpHTTPURL{ - URL: ha.URL.String(), - } -} - -// URL for a HTTP origin, capnp doesn't have native support for URL, so represent it as string -type CapnpHTTPURL struct { - URL string `capnp:"url"` -} - -type UnixPath struct { - Path string -} - -func (up *UnixPath) Addr() string { - return up.Path + URLString string `capnp:"urlString"` + TCPKeepAlive time.Duration `capnp:"tcpKeepAlive"` + DialDualStack bool + TLSHandshakeTimeout time.Duration `capnp:"tlsHandshakeTimeout"` + TLSVerify bool `capnp:"tlsVerify"` + OriginCAPool string + OriginServerName string + MaxIdleConnections uint64 + IdleConnectionTimeout time.Duration + ProxyConnectionTimeout time.Duration + ExpectContinueTimeout time.Duration + ChunkedEncoding bool } func (hc *HTTPOriginConfig) Service() (originservice.OriginService, error) { - rootCAs, err := tlsconfig.LoadCustomCertPool(hc.OriginCAPool) + rootCAs, err := tlsconfig.LoadCustomOriginCA(hc.OriginCAPool) if err != nil { return nil, err } + dialContext := (&net.Dialer{ - Timeout: hc.ProxyConnectTimeout, + Timeout: hc.ProxyConnectionTimeout, KeepAlive: hc.TCPKeepAlive, DualStack: hc.DialDualStack, }).DialContext @@ -150,25 +172,29 @@ func (hc *HTTPOriginConfig) Service() (originservice.OriginService, error) { IdleConnTimeout: hc.IdleConnectionTimeout, ExpectContinueTimeout: hc.ExpectContinueTimeout, } - if unixPath, ok := hc.URL.(*UnixPath); ok { + url, err := url.Parse(hc.URLString) + if err != nil { + return nil, errors.Wrapf(err, "%s is not a valid URL", hc.URLString) + } + if url.Scheme == "unix" { transport.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) { - return dialContext(ctx, "unix", unixPath.Addr()) + return dialContext(ctx, "unix", url.Host) } } - return originservice.NewHTTPService(transport, hc.URL.Addr(), hc.ChunkedEncoding), nil + return originservice.NewHTTPService(transport, url, hc.ChunkedEncoding), nil } func (_ *HTTPOriginConfig) isOriginConfig() {} type WebSocketOriginConfig struct { - URL string `capnp:"url"` + URLString string `capnp:"urlString"` TLSVerify bool `capnp:"tlsVerify"` OriginCAPool string OriginServerName string } func (wsc *WebSocketOriginConfig) Service() (originservice.OriginService, error) { - rootCAs, err := tlsconfig.LoadCustomCertPool(wsc.OriginCAPool) + rootCAs, err := tlsconfig.LoadCustomOriginCA(wsc.OriginCAPool) if err != nil { return nil, err } @@ -177,7 +203,12 @@ func (wsc *WebSocketOriginConfig) Service() (originservice.OriginService, error) ServerName: wsc.OriginServerName, InsecureSkipVerify: wsc.TLSVerify, } - return originservice.NewWebSocketService(tlsConfig, wsc.URL) + + url, err := url.Parse(wsc.URLString) + if err != nil { + return nil, errors.Wrapf(err, "%s is not a valid URL", wsc.URLString) + } + return originservice.NewWebSocketService(tlsConfig, url) } func (_ *WebSocketOriginConfig) isOriginConfig() {} @@ -221,18 +252,45 @@ func (_ *HelloWorldOriginConfig) isOriginConfig() {} */ func MarshalClientConfig(s tunnelrpc.ClientConfig, p *ClientConfig) error { - s.SetVersion(p.Version) - s.SetAutoUpdateFrequency(p.AutoUpdateFrequency.Nanoseconds()) - s.SetMetricsUpdateFrequency(p.MetricsUpdateFrequency.Nanoseconds()) - s.SetHeartbeatInterval(p.HeartbeatInterval.Nanoseconds()) - s.SetMaxFailedHeartbeats(p.MaxFailedHeartbeats) - s.SetGracePeriod(p.GracePeriod.Nanoseconds()) - s.SetNumHAConnections(p.NumHAConnections) - err := marshalDoHProxyConfigs(s, p.DoHProxyConfigs) + s.SetVersion(uint64(p.Version)) + + supervisorConfig, err := s.NewSupervisorConfig() if err != nil { - return err + return errors.Wrap(err, "failed to get SupervisorConfig") } - return marshalReverseProxyConfigs(s, p.ReverseProxyConfigs) + if err = MarshalSupervisorConfig(supervisorConfig, p.SupervisorConfig); err != nil { + return errors.Wrap(err, "MarshalSupervisorConfig error") + } + + edgeConnectionConfig, err := s.NewEdgeConnectionConfig() + if err != nil { + return errors.Wrap(err, "failed to get EdgeConnectionConfig") + } + if err := MarshalEdgeConnectionConfig(edgeConnectionConfig, p.EdgeConnectionConfig); err != nil { + return errors.Wrap(err, "MarshalEdgeConnectionConfig error") + } + + if err := marshalDoHProxyConfigs(s, p.DoHProxyConfigs); err != nil { + return errors.Wrap(err, "marshalDoHProxyConfigs error") + } + if err := marshalReverseProxyConfigs(s, p.ReverseProxyConfigs); err != nil { + return errors.Wrap(err, "marshalReverseProxyConfigs error") + } + return nil +} + +func MarshalSupervisorConfig(s tunnelrpc.SupervisorConfig, p *SupervisorConfig) error { + if err := pogs.Insert(tunnelrpc.SupervisorConfig_TypeID, s.Struct, p); err != nil { + return errors.Wrap(err, "failed to insert SupervisorConfig") + } + return nil +} + +func MarshalEdgeConnectionConfig(s tunnelrpc.EdgeConnectionConfig, p *EdgeConnectionConfig) error { + if err := pogs.Insert(tunnelrpc.EdgeConnectionConfig_TypeID, s.Struct, p); err != nil { + return errors.Wrap(err, "failed to insert EdgeConnectionConfig") + } + return nil } func marshalDoHProxyConfigs(s tunnelrpc.ClientConfig, dohProxyConfigs []*DoHProxyConfig) error { @@ -265,23 +323,48 @@ func marshalReverseProxyConfigs(s tunnelrpc.ClientConfig, reverseProxyConfigs [] func UnmarshalClientConfig(s tunnelrpc.ClientConfig) (*ClientConfig, error) { p := new(ClientConfig) - p.Version = s.Version() - p.AutoUpdateFrequency = time.Duration(s.AutoUpdateFrequency()) - p.MetricsUpdateFrequency = time.Duration(s.MetricsUpdateFrequency()) - p.HeartbeatInterval = time.Duration(s.HeartbeatInterval()) - p.MaxFailedHeartbeats = s.MaxFailedHeartbeats() - p.GracePeriod = time.Duration(s.GracePeriod()) - p.NumHAConnections = s.NumHAConnections() - dohProxyConfigs, err := unmarshalDoHProxyConfigs(s) + p.Version = Version(s.Version()) + + supervisorConfig, err := s.SupervisorConfig() if err != nil { - return nil, err + return nil, errors.Wrap(err, "failed to get SupervisorConfig") } - p.DoHProxyConfigs = dohProxyConfigs - reverseProxyConfigs, err := unmarshalReverseProxyConfigs(s) + p.SupervisorConfig, err = UnmarshalSupervisorConfig(supervisorConfig) if err != nil { - return nil, err + return nil, errors.Wrap(err, "UnmarshalSupervisorConfig error") } - p.ReverseProxyConfigs = reverseProxyConfigs + + edgeConnectionConfig, err := s.EdgeConnectionConfig() + if err != nil { + return nil, errors.Wrap(err, "failed to get ConnectionManagerConfig") + } + p.EdgeConnectionConfig, err = UnmarshalEdgeConnectionConfig(edgeConnectionConfig) + if err != nil { + return nil, errors.Wrap(err, "UnmarshalConnectionManagerConfig error") + } + + p.DoHProxyConfigs, err = unmarshalDoHProxyConfigs(s) + if err != nil { + return nil, errors.Wrap(err, "unmarshalDoHProxyConfigs error") + } + + p.ReverseProxyConfigs, err = unmarshalReverseProxyConfigs(s) + if err != nil { + return nil, errors.Wrap(err, "unmarshalReverseProxyConfigs error") + } + + return p, nil +} + +func UnmarshalSupervisorConfig(s tunnelrpc.SupervisorConfig) (*SupervisorConfig, error) { + p := new(SupervisorConfig) + err := pogs.Extract(p, tunnelrpc.SupervisorConfig_TypeID, s.Struct) + return p, err +} + +func UnmarshalEdgeConnectionConfig(s tunnelrpc.EdgeConnectionConfig) (*EdgeConnectionConfig, error) { + p := new(EdgeConnectionConfig) + err := pogs.Extract(p, tunnelrpc.EdgeConnectionConfig_TypeID, s.Struct) return p, err } @@ -320,13 +403,38 @@ func unmarshalReverseProxyConfigs(s tunnelrpc.ClientConfig) ([]*ReverseProxyConf } func MarshalUseConfigurationResult(s tunnelrpc.UseConfigurationResult, p *UseConfigurationResult) error { - return pogs.Insert(tunnelrpc.UseConfigurationResult_TypeID, s.Struct, p) + capnpList, err := s.NewFailedConfigs(int32(len(p.FailedConfigs))) + if err != nil { + return errors.Wrap(err, "Cannot create new FailedConfigs") + } + for i, unmarshalledFailedConfig := range p.FailedConfigs { + err := MarshalFailedConfig(capnpList.At(i), unmarshalledFailedConfig) + if err != nil { + return errors.Wrapf(err, "Cannot MarshalFailedConfig at index %d", i) + } + } + s.SetSuccess(p.Success) + return nil } func UnmarshalUseConfigurationResult(s tunnelrpc.UseConfigurationResult) (*UseConfigurationResult, error) { p := new(UseConfigurationResult) - err := pogs.Extract(p, tunnelrpc.UseConfigurationResult_TypeID, s.Struct) - return p, err + var failedConfigs []*FailedConfig + marshalledFailedConfigs, err := s.FailedConfigs() + if err != nil { + return nil, errors.Wrap(err, "Cannot get FailedConfigs") + } + for i := 0; i < marshalledFailedConfigs.Len(); i++ { + ss := marshalledFailedConfigs.At(i) + failedConfig, err := UnmarshalFailedConfig(ss) + if err != nil { + return nil, errors.Wrapf(err, "Cannot UnmarshalFailedConfig at index %d", i) + } + failedConfigs = append(failedConfigs, failedConfig) + } + p.FailedConfigs = failedConfigs + p.Success = s.Success() + return p, nil } func MarshalDoHProxyConfig(s tunnelrpc.DoHProxyConfig, p *DoHProxyConfig) error { @@ -340,7 +448,7 @@ func UnmarshalDoHProxyConfig(s tunnelrpc.DoHProxyConfig) (*DoHProxyConfig, error } func MarshalReverseProxyConfig(s tunnelrpc.ReverseProxyConfig, p *ReverseProxyConfig) error { - s.SetTunnelHostname(p.TunnelHostname) + s.SetTunnelHostname(p.TunnelHostname.String()) switch config := p.Origin.(type) { case *HTTPOriginConfig: ss, err := s.Origin().NewHttp() @@ -381,7 +489,7 @@ func UnmarshalReverseProxyConfig(s tunnelrpc.ReverseProxyConfig) (*ReverseProxyC if err != nil { return nil, err } - p.TunnelHostname = tunnelHostname + p.TunnelHostname = h2mux.TunnelHostname(tunnelHostname) switch s.Origin().Which() { case tunnelrpc.ReverseProxyConfig_origin_Which_http: ss, err := s.Origin().Http() @@ -421,115 +529,12 @@ func UnmarshalReverseProxyConfig(s tunnelrpc.ReverseProxyConfig) (*ReverseProxyC } func MarshalHTTPOriginConfig(s tunnelrpc.HTTPOriginConfig, p *HTTPOriginConfig) error { - switch originAddr := p.URL.(type) { - case *HTTPURL: - ss, err := s.OriginAddr().NewHttp() - if err != nil { - return err - } - if err := MarshalHTTPURL(ss, originAddr); err != nil { - return err - } - case *UnixPath: - ss, err := s.OriginAddr().NewUnix() - if err != nil { - return err - } - if err := MarshalUnixPath(ss, originAddr); err != nil { - return err - } - default: - return fmt.Errorf("Unknown type for OriginAddr: %T", originAddr) - } - s.SetTcpKeepAlive(p.TCPKeepAlive.Nanoseconds()) - s.SetDialDualStack(p.DialDualStack) - s.SetTlsHandshakeTimeout(p.TLSHandshakeTimeout.Nanoseconds()) - s.SetTlsVerify(p.TLSVerify) - s.SetOriginCAPool(p.OriginCAPool) - s.SetOriginServerName(p.OriginServerName) - s.SetMaxIdleConnections(p.MaxIdleConnections) - s.SetIdleConnectionTimeout(p.IdleConnectionTimeout.Nanoseconds()) - s.SetProxyConnectionTimeout(p.ProxyConnectTimeout.Nanoseconds()) - s.SetExpectContinueTimeout(p.ExpectContinueTimeout.Nanoseconds()) - s.SetChunkedEncoding(p.ChunkedEncoding) - return nil + return pogs.Insert(tunnelrpc.HTTPOriginConfig_TypeID, s.Struct, p) } func UnmarshalHTTPOriginConfig(s tunnelrpc.HTTPOriginConfig) (*HTTPOriginConfig, error) { p := new(HTTPOriginConfig) - switch s.OriginAddr().Which() { - case tunnelrpc.HTTPOriginConfig_originAddr_Which_http: - ss, err := s.OriginAddr().Http() - if err != nil { - return nil, err - } - originAddr, err := UnmarshalCapnpHTTPURL(ss) - if err != nil { - return nil, err - } - p.URL = originAddr - case tunnelrpc.HTTPOriginConfig_originAddr_Which_unix: - ss, err := s.OriginAddr().Unix() - if err != nil { - return nil, err - } - originAddr, err := UnmarshalUnixPath(ss) - if err != nil { - return nil, err - } - p.URL = originAddr - default: - return nil, fmt.Errorf("Unknown type for OriginAddr: %T", s.OriginAddr().Which()) - } - p.TCPKeepAlive = time.Duration(s.TcpKeepAlive()) - p.DialDualStack = s.DialDualStack() - p.TLSHandshakeTimeout = time.Duration(s.TlsHandshakeTimeout()) - p.TLSVerify = s.TlsVerify() - originCAPool, err := s.OriginCAPool() - if err != nil { - return nil, err - } - p.OriginCAPool = originCAPool - originServerName, err := s.OriginServerName() - if err != nil { - return nil, err - } - p.OriginServerName = originServerName - p.MaxIdleConnections = s.MaxIdleConnections() - p.IdleConnectionTimeout = time.Duration(s.IdleConnectionTimeout()) - p.ProxyConnectTimeout = time.Duration(s.ProxyConnectionTimeout()) - p.ExpectContinueTimeout = time.Duration(s.ExpectContinueTimeout()) - p.ChunkedEncoding = s.ChunkedEncoding() - return p, nil -} - -func MarshalHTTPURL(s tunnelrpc.CapnpHTTPURL, p *HTTPURL) error { - return pogs.Insert(tunnelrpc.CapnpHTTPURL_TypeID, s.Struct, p.capnpHTTPURL()) -} - -func UnmarshalCapnpHTTPURL(s tunnelrpc.CapnpHTTPURL) (*HTTPURL, error) { - p := new(CapnpHTTPURL) - err := pogs.Extract(p, tunnelrpc.CapnpHTTPURL_TypeID, s.Struct) - if err != nil { - return nil, err - } - url, err := url.Parse(p.URL) - if err != nil { - return nil, err - } - return &HTTPURL{ - URL: url, - }, nil -} - -func MarshalUnixPath(s tunnelrpc.UnixPath, p *UnixPath) error { - err := pogs.Insert(tunnelrpc.UnixPath_TypeID, s.Struct, p) - return err -} - -func UnmarshalUnixPath(s tunnelrpc.UnixPath) (*UnixPath, error) { - p := new(UnixPath) - err := pogs.Extract(p, tunnelrpc.UnixPath_TypeID, s.Struct) + err := pogs.Extract(p, tunnelrpc.HTTPOriginConfig_TypeID, s.Struct) return p, err } @@ -584,3 +589,141 @@ func (c *ClientService_PogsClient) UseConfiguration( } return UnmarshalUseConfigurationResult(retval) } + +func ClientService_ServerToClient(s ClientService) tunnelrpc.ClientService { + return tunnelrpc.ClientService_ServerToClient(ClientService_PogsImpl{s}) +} + +type ClientService_PogsImpl struct { + impl ClientService +} + +func (i ClientService_PogsImpl) UseConfiguration(p tunnelrpc.ClientService_useConfiguration) error { + config, err := p.Params.ClientServiceConfig() + if err != nil { + return errors.Wrap(err, "Cannot get CloudflaredConfig parameter") + } + pogsConfig, err := UnmarshalClientConfig(config) + if err != nil { + return errors.Wrap(err, "Cannot unmarshal tunnelrpc.CloudflaredConfig to *CloudflaredConfig") + } + server.Ack(p.Options) + userConfigResult, err := i.impl.UseConfiguration(p.Ctx, pogsConfig) + if err != nil { + return err + } + result, err := p.Results.NewResult() + if err != nil { + return err + } + return MarshalUseConfigurationResult(result, userConfigResult) +} + +type UseConfigurationResult struct { + Success bool + FailedConfigs []*FailedConfig +} + +type FailedConfig struct { + Config FallibleConfig + Reason string +} + +func MarshalFailedConfig(s tunnelrpc.FailedConfig, p *FailedConfig) error { + switch config := p.Config.(type) { + case *SupervisorConfig: + ss, err := s.Config().NewSupervisor() + if err != nil { + return err + } + err = MarshalSupervisorConfig(ss, config) + if err != nil { + return err + } + case *EdgeConnectionConfig: + ss, err := s.Config().EdgeConnection() + if err != nil { + return err + } + err = MarshalEdgeConnectionConfig(ss, config) + if err != nil { + return err + } + case *DoHProxyConfig: + ss, err := s.Config().NewDoh() + if err != nil { + return err + } + err = MarshalDoHProxyConfig(ss, config) + if err != nil { + return err + } + case *ReverseProxyConfig: + ss, err := s.Config().NewReverseProxy() + if err != nil { + return err + } + err = MarshalReverseProxyConfig(ss, config) + if err != nil { + return err + } + default: + return fmt.Errorf("Unknown type for Config: %T", config) + } + s.SetReason(p.Reason) + return nil +} + +func UnmarshalFailedConfig(s tunnelrpc.FailedConfig) (*FailedConfig, error) { + p := new(FailedConfig) + switch s.Config().Which() { + case tunnelrpc.FailedConfig_config_Which_supervisor: + ss, err := s.Config().Supervisor() + if err != nil { + return nil, errors.Wrap(err, "Cannot get SupervisorConfig from Config") + } + config, err := UnmarshalSupervisorConfig(ss) + if err != nil { + return nil, errors.Wrap(err, "Cannot UnmarshalSupervisorConfig") + } + p.Config = config + case tunnelrpc.FailedConfig_config_Which_edgeConnection: + ss, err := s.Config().EdgeConnection() + if err != nil { + return nil, errors.Wrap(err, "Cannot get ConnectionManager from Config") + } + config, err := UnmarshalEdgeConnectionConfig(ss) + if err != nil { + return nil, errors.Wrap(err, "Cannot UnmarshalConnectionManagerConfig") + } + p.Config = config + case tunnelrpc.FailedConfig_config_Which_doh: + ss, err := s.Config().Doh() + if err != nil { + return nil, errors.Wrap(err, "Cannot get Doh from Config") + } + config, err := UnmarshalDoHProxyConfig(ss) + if err != nil { + return nil, errors.Wrap(err, "Cannot UnmarshalDoHProxyConfig") + } + p.Config = config + case tunnelrpc.FailedConfig_config_Which_reverseProxy: + ss, err := s.Config().ReverseProxy() + if err != nil { + return nil, errors.Wrap(err, "Cannot get ReverseProxy from Config") + } + config, err := UnmarshalReverseProxyConfig(ss) + if err != nil { + return nil, errors.Wrap(err, "Cannot UnmarshalReverseProxyConfig") + } + p.Config = config + default: + return nil, fmt.Errorf("Unknown type for FailedConfig: %v", s.Config().Which()) + } + reason, err := s.Reason() + if err != nil { + return nil, errors.Wrap(err, "Cannot get Reason") + } + p.Reason = reason + return p, nil +} diff --git a/tunnelrpc/pogs/config_test.go b/tunnelrpc/pogs/config_test.go index c958449c..bb1299a5 100644 --- a/tunnelrpc/pogs/config_test.go +++ b/tunnelrpc/pogs/config_test.go @@ -2,7 +2,6 @@ package pogs import ( "fmt" - "net/url" "reflect" "testing" "time" @@ -13,6 +12,14 @@ import ( capnp "zombiezen.com/go/capnproto2" ) +func TestVersion(t *testing.T) { + firstVersion := InitVersion() + secondVersion := Version(1) + assert.False(t, firstVersion.IsNewerOrEqual(secondVersion)) + assert.True(t, secondVersion.IsNewerOrEqual(firstVersion)) + assert.True(t, secondVersion.IsNewerOrEqual(secondVersion)) +} + func TestClientConfig(t *testing.T) { addDoHProxyConfigs := func(c *ClientConfig) { c.DoHProxyConfigs = []*DoHProxyConfig{ @@ -66,8 +73,17 @@ func TestUseConfigurationResult(t *testing.T) { Success: true, }, &UseConfigurationResult{ - Success: false, - ErrorMessage: "the quick brown fox jumped over the lazy dogs", + Success: false, + FailedConfigs: []*FailedConfig{ + { + Config: sampleReverseProxyConfig(), + Reason: "Invalid certificate", + }, + { + Config: sampleDoHProxyConfig(), + Reason: "Cannot listen on port 53", + }, + }, }, } for i, testCase := range testCases { @@ -188,18 +204,41 @@ func TestWebSocketOriginConfig(t *testing.T) { } } +func TestOriginConfigInvalidURL(t *testing.T) { + invalidConfigs := []OriginConfig{ + &HTTPOriginConfig{ + // this url doesn't have a scheme + URLString: "127.0.0.1:36192", + }, + &WebSocketOriginConfig{ + URLString: "127.0.0.1:36192", + }, + } + + for _, config := range invalidConfigs { + service, err := config.Service() + assert.Error(t, err) + assert.Nil(t, service) + } +} + ////////////////////////////////////////////////////////////////////////////// // Functions to generate sample data for ease of testing func sampleClientConfig(overrides ...func(*ClientConfig)) *ClientConfig { sample := &ClientConfig{ - Version: uint64(1337), - AutoUpdateFrequency: 21 * time.Hour, - MetricsUpdateFrequency: 11 * time.Minute, - HeartbeatInterval: 5 * time.Second, - MaxFailedHeartbeats: 9001, - GracePeriod: 31 * time.Second, - NumHAConnections: 49, + Version: Version(1337), + SupervisorConfig: &SupervisorConfig{ + AutoUpdateFrequency: 21 * time.Hour, + MetricsUpdateFrequency: 11 * time.Minute, + GracePeriod: 31 * time.Second, + }, + EdgeConnectionConfig: &EdgeConnectionConfig{ + NumHAConnections: 49, + Timeout: 9 * time.Second, + HeartbeatInterval: 5 * time.Second, + MaxFailedHeartbeats: 9001, + }, } sample.ensureNoZeroFields() for _, f := range overrides { @@ -238,23 +277,18 @@ func sampleReverseProxyConfig(overrides ...func(*ReverseProxyConfig)) *ReversePr func sampleHTTPOriginConfig(overrides ...func(*HTTPOriginConfig)) *HTTPOriginConfig { sample := &HTTPOriginConfig{ - URL: &HTTPURL{ - URL: &url.URL{ - Scheme: "https", - Host: "example.com", - }, - }, - TCPKeepAlive: 7 * time.Second, - DialDualStack: true, - TLSHandshakeTimeout: 11 * time.Second, - TLSVerify: true, - OriginCAPool: "/etc/cert.pem", - OriginServerName: "secure.example.com", - MaxIdleConnections: 19, - IdleConnectionTimeout: 17 * time.Second, - ProxyConnectTimeout: 15 * time.Second, - ExpectContinueTimeout: 21 * time.Second, - ChunkedEncoding: true, + URLString: "https.example.com", + TCPKeepAlive: 7 * time.Second, + DialDualStack: true, + TLSHandshakeTimeout: 11 * time.Second, + TLSVerify: true, + OriginCAPool: "/etc/cert.pem", + OriginServerName: "secure.example.com", + MaxIdleConnections: 19, + IdleConnectionTimeout: 17 * time.Second, + ProxyConnectionTimeout: 15 * time.Second, + ExpectContinueTimeout: 21 * time.Second, + ChunkedEncoding: true, } sample.ensureNoZeroFields() for _, f := range overrides { @@ -265,20 +299,18 @@ func sampleHTTPOriginConfig(overrides ...func(*HTTPOriginConfig)) *HTTPOriginCon func sampleHTTPOriginUnixPathConfig(overrides ...func(*HTTPOriginConfig)) *HTTPOriginConfig { sample := &HTTPOriginConfig{ - URL: &UnixPath{ - Path: "/var/lib/file.sock", - }, - TCPKeepAlive: 7 * time.Second, - DialDualStack: true, - TLSHandshakeTimeout: 11 * time.Second, - TLSVerify: true, - OriginCAPool: "/etc/cert.pem", - OriginServerName: "secure.example.com", - MaxIdleConnections: 19, - IdleConnectionTimeout: 17 * time.Second, - ProxyConnectTimeout: 15 * time.Second, - ExpectContinueTimeout: 21 * time.Second, - ChunkedEncoding: true, + URLString: "unix:/var/lib/file.sock", + TCPKeepAlive: 7 * time.Second, + DialDualStack: true, + TLSHandshakeTimeout: 11 * time.Second, + TLSVerify: true, + OriginCAPool: "/etc/cert.pem", + OriginServerName: "secure.example.com", + MaxIdleConnections: 19, + IdleConnectionTimeout: 17 * time.Second, + ProxyConnectionTimeout: 15 * time.Second, + ExpectContinueTimeout: 21 * time.Second, + ChunkedEncoding: true, } sample.ensureNoZeroFields() for _, f := range overrides { @@ -289,7 +321,7 @@ func sampleHTTPOriginUnixPathConfig(overrides ...func(*HTTPOriginConfig)) *HTTPO func sampleWebSocketOriginConfig(overrides ...func(*WebSocketOriginConfig)) *WebSocketOriginConfig { sample := &WebSocketOriginConfig{ - URL: "ssh://example.com", + URLString: "ssh://example.com", TLSVerify: true, OriginCAPool: "/etc/cert.pem", OriginServerName: "secure.example.com", diff --git a/tunnelrpc/tunnelrpc.capnp b/tunnelrpc/tunnelrpc.capnp index 5189b747..5d178278 100644 --- a/tunnelrpc/tunnelrpc.capnp +++ b/tunnelrpc/tunnelrpc.capnp @@ -77,35 +77,48 @@ struct ClientConfig { # to monotonically increase in value. Any configuration supplied to # useConfiguration() with a smaller `version` should be ignored. version @0 :UInt64; + # supervisorConfig is configuration for supervisor, the component that manages connection manager, + # autoupdater and metrics server + supervisorConfig @1 :SupervisorConfig; + # edgeConnectionConfig is configuration for connection manager, the componenet that manages connections with the edge + edgeConnectionConfig @2 :EdgeConnectionConfig; + # Configuration for cloudflared to run as a DNS-over-HTTPS proxy. + # cloudflared CLI option: `proxy-dns` + dohProxyConfigs @3 :List(DoHProxyConfig); + # Configuration for cloudflared to run as an HTTP reverse proxy. + reverseProxyConfigs @4 :List(ReverseProxyConfig); +} + +struct SupervisorConfig { # Frequency (in ns) to check Equinox for updates. # Zero means auto-update is disabled. # cloudflared CLI option: `autoupdate-freq` - autoUpdateFrequency @1 :Int64; + autoUpdateFrequency @0 :Int64; # Frequency (in ns) to update connection-based metrics. # cloudflared CLI option: `metrics-update-freq` - metricsUpdateFrequency @2 :Int64; - # interval (in ns) between heartbeats with the Cloudflare edge - # cloudflared CLI option: `heartbeat-interval` - heartbeatInterval @3 :Int64; - # Minimum number of unacked heartbeats for cloudflared to send before - # closing the connection to the edge. - # cloudflared CLI option: `heartbeat-count` - maxFailedHeartbeats @4 :UInt64; + metricsUpdateFrequency @1 :Int64; # Time (in ns) to continue serving requests after cloudflared receives its # first SIGINT/SIGTERM. A second SIGINT/SIGTERM will force cloudflared to # shutdown immediately. For example, this field can be used to gracefully # transition traffic to another cloudflared instance. # cloudflared CLI option: `grace-period` - gracePeriod @5 :Int64; - # Configuration for cloudflared to run as a DNS-over-HTTPS proxy. - # cloudflared CLI option: `proxy-dns` - dohProxyConfigs @6 :List(DoHProxyConfig); - # Configuration for cloudflared to run as an HTTP reverse proxy. - reverseProxyConfigs @7 :List(ReverseProxyConfig); - # Number of persistent connections to keep open between cloudflared and - # the edge. + gracePeriod @2 :Int64; +} + +struct EdgeConnectionConfig { # cloudflared CLI option: `ha-connections` - numHAConnections @8 :UInt8; + numHAConnections @0 :UInt8; + # Interval (in ns) between heartbeats with the Cloudflare edge + # cloudflared CLI option: `heartbeat-interval` + heartbeatInterval @1 :Int64; + # Maximum wait time to connect with the edge. + timeout @2 :Int64; + # Number of unacked heartbeats for cloudflared to send before + # closing the connection to the edge. + # cloudflared CLI option: `heartbeat-count` + maxFailedHeartbeats @3 :UInt64; + # Absolute path of the file containing certificate and token to connect with the edge + userCredentialPath @4 :Text; } struct ReverseProxyConfig { @@ -134,7 +147,7 @@ struct WebSocketOriginConfig { # cloudflared will start a websocket server that forwards data to this URI # cloudflared CLI option: `url` # cloudflared logic: https://github.com/cloudflare/cloudflared/blob/2019.3.2/cmd/cloudflared/tunnel/cmd.go#L304 - url @0 :Text; + urlString @0 :Text; # Whether cloudflared should verify TLS connections to the origin. # negation of cloudflared CLI option: `no-tls-verify` tlsVerify @1 :Bool; @@ -155,25 +168,22 @@ struct WebSocketOriginConfig { struct HTTPOriginConfig { # HTTP(S) URL of the origin service. # cloudflared CLI option: `url` - originAddr :union { - http @0 :CapnpHTTPURL; - unix @1 :UnixPath; - } + urlString @0 :Text; # the TCP keep-alive period (in ns) for an active network connection. # Zero means keep-alives are not enabled. # cloudflared CLI option: `proxy-tcp-keepalive` - tcpKeepAlive @2 :Int64; + tcpKeepAlive @1 :Int64; # whether cloudflared should use a "happy eyeballs"-compliant procedure # to connect to origins that resolve to both IPv4 and IPv6 addresses # negation of cloudflared CLI option: `proxy-no-happy-eyeballs` - dialDualStack @3 :Bool; + dialDualStack @2 :Bool; # maximum time (in ns) for cloudflared to wait for a TLS handshake # with the origin. Zero means no timeout. # cloudflared CLI option: `proxy-tls-timeout` - tlsHandshakeTimeout @4 :Int64; + tlsHandshakeTimeout @3 :Int64; # Whether cloudflared should verify TLS connections to the origin. # negation of cloudflared CLI option: `no-tls-verify` - tlsVerify @5 :Bool; + tlsVerify @4 :Bool; # originCAPool specifies the root CA that cloudflared should use when # verifying TLS connections to the origin. # - if tlsVerify is false, originCAPool will be ignored. @@ -182,39 +192,29 @@ struct HTTPOriginConfig { # - if tlsVerify is true and originCAPool is non-empty, cloudflared will # treat it as the filepath to the root CA. # cloudflared CLI option: `origin-ca-pool` - originCAPool @6 :Text; + originCAPool @5 :Text; # Hostname to use when verifying TLS connections to the origin. # cloudflared CLI option: `origin-server-name` - originServerName @7 :Text; + originServerName @6 :Text; # maximum number of idle (keep-alive) connections for cloudflared to # keep open with the origin. Zero means no limit. # cloudflared CLI option: `proxy-keepalive-connections` - maxIdleConnections @8 :UInt64; + maxIdleConnections @7 :UInt64; # maximum time (in ns) for an idle (keep-alive) connection to remain # idle before closing itself. Zero means no timeout. # cloudflared CLI option: `proxy-keepalive-timeout` - idleConnectionTimeout @9 :Int64; + idleConnectionTimeout @8 :Int64; # maximum amount of time a dial will wait for a connect to complete. - proxyConnectionTimeout @10 :Int64; + proxyConnectionTimeout @9 :Int64; # The amount of time to wait for origin's first response headers after fully # writing the request headers if the request has an "Expect: 100-continue" header. # Zero means no timeout and causes the body to be sent immediately, without # waiting for the server to approve. - expectContinueTimeout @11 :Int64; + expectContinueTimeout @10 :Int64; # Whether cloudflared should allow chunked transfer encoding to the # origin. (This should be disabled for WSGI origins, for example.) # negation of cloudflared CLI option: `no-chunked-encoding` - chunkedEncoding @12 :Bool; -} - -# URL for a HTTP origin, capnp doesn't have native support for URL, so represent it as Text -struct CapnpHTTPURL { - url @0: Text; -} - -# Path to a unix socket -struct UnixPath { - path @0: Text; + chunkedEncoding @11 :Bool; } # configuration for cloudflared to provide a DNS over HTTPS proxy server @@ -251,7 +251,17 @@ struct ServerInfo { struct UseConfigurationResult { success @0 :Bool; - errorMessage @1 :Text; + failedConfigs @1 :List(FailedConfig); +} + +struct FailedConfig { + config :union { + supervisor @0 :SupervisorConfig; + edgeConnection @1 :EdgeConnectionConfig; + doh @2 :DoHProxyConfig; + reverseProxy @3 :ReverseProxyConfig; + } + reason @4 :Text; } interface TunnelServer { diff --git a/tunnelrpc/tunnelrpc.capnp.go b/tunnelrpc/tunnelrpc.capnp.go index 486335df..14b38c8b 100644 --- a/tunnelrpc/tunnelrpc.capnp.go +++ b/tunnelrpc/tunnelrpc.capnp.go @@ -3,9 +3,8 @@ package tunnelrpc import ( - strconv "strconv" - context "golang.org/x/net/context" + strconv "strconv" capnp "zombiezen.com/go/capnproto2" text "zombiezen.com/go/capnproto2/encoding/text" schemas "zombiezen.com/go/capnproto2/schemas" @@ -107,6 +106,11 @@ func (s Authentication_List) At(i int) Authentication { return Authentication{s. func (s Authentication_List) Set(i int, v Authentication) error { return s.List.SetStruct(i, v.Struct) } +func (s Authentication_List) String() string { + str, _ := text.MarshalList(0xc082ef6e0d42ed1d, s.List) + return str +} + // Authentication_Promise is a wrapper for a Authentication promised by a client call. type Authentication_Promise struct{ *capnp.Pipeline } @@ -247,6 +251,11 @@ func (s TunnelRegistration_List) Set(i int, v TunnelRegistration) error { return s.List.SetStruct(i, v.Struct) } +func (s TunnelRegistration_List) String() string { + str, _ := text.MarshalList(0xf41a0f001ad49e46, s.List) + return str +} + // TunnelRegistration_Promise is a wrapper for a TunnelRegistration promised by a client call. type TunnelRegistration_Promise struct{ *capnp.Pipeline } @@ -476,6 +485,11 @@ func (s RegistrationOptions_List) Set(i int, v RegistrationOptions) error { return s.List.SetStruct(i, v.Struct) } +func (s RegistrationOptions_List) String() string { + str, _ := text.MarshalList(0xc793e50592935b4a, s.List) + return str +} + // RegistrationOptions_Promise is a wrapper for a RegistrationOptions promised by a client call. type RegistrationOptions_Promise struct{ *capnp.Pipeline } @@ -606,6 +620,11 @@ func (s CapnpConnectParameters_List) Set(i int, v CapnpConnectParameters) error return s.List.SetStruct(i, v.Struct) } +func (s CapnpConnectParameters_List) String() string { + str, _ := text.MarshalList(0xa78f37418c1077c8, s.List) + return str +} + // CapnpConnectParameters_Promise is a wrapper for a CapnpConnectParameters promised by a client call. type CapnpConnectParameters_Promise struct{ *capnp.Pipeline } @@ -702,6 +721,11 @@ func (s ConnectResult_List) At(i int) ConnectResult { return ConnectResult{s.Lis func (s ConnectResult_List) Set(i int, v ConnectResult) error { return s.List.SetStruct(i, v.Struct) } +func (s ConnectResult_List) String() string { + str, _ := text.MarshalList(0xff8d9848747c956a, s.List) + return str +} + // ConnectResult_Promise is a wrapper for a ConnectResult promised by a client call. type ConnectResult_Promise struct{ *capnp.Pipeline } @@ -791,6 +815,11 @@ func (s ConnectError_List) At(i int) ConnectError { return ConnectError{s.List.S func (s ConnectError_List) Set(i int, v ConnectError) error { return s.List.SetStruct(i, v.Struct) } +func (s ConnectError_List) String() string { + str, _ := text.MarshalList(0xb14ce48f4e2abb0d, s.List) + return str +} + // ConnectError_Promise is a wrapper for a ConnectError promised by a client call. type ConnectError_Promise struct{ *capnp.Pipeline } @@ -805,12 +834,12 @@ type ClientConfig struct{ capnp.Struct } const ClientConfig_TypeID = 0xf0a143f1c95a678e func NewClientConfig(s *capnp.Segment) (ClientConfig, error) { - st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 56, PointerCount: 2}) + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 4}) return ClientConfig{st}, err } func NewRootClientConfig(s *capnp.Segment) (ClientConfig, error) { - st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 56, PointerCount: 2}) + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 4}) return ClientConfig{st}, err } @@ -832,58 +861,68 @@ func (s ClientConfig) SetVersion(v uint64) { s.Struct.SetUint64(0, v) } -func (s ClientConfig) AutoUpdateFrequency() int64 { - return int64(s.Struct.Uint64(8)) -} - -func (s ClientConfig) SetAutoUpdateFrequency(v int64) { - s.Struct.SetUint64(8, uint64(v)) -} - -func (s ClientConfig) MetricsUpdateFrequency() int64 { - return int64(s.Struct.Uint64(16)) -} - -func (s ClientConfig) SetMetricsUpdateFrequency(v int64) { - s.Struct.SetUint64(16, uint64(v)) -} - -func (s ClientConfig) HeartbeatInterval() int64 { - return int64(s.Struct.Uint64(24)) -} - -func (s ClientConfig) SetHeartbeatInterval(v int64) { - s.Struct.SetUint64(24, uint64(v)) -} - -func (s ClientConfig) MaxFailedHeartbeats() uint64 { - return s.Struct.Uint64(32) -} - -func (s ClientConfig) SetMaxFailedHeartbeats(v uint64) { - s.Struct.SetUint64(32, v) -} - -func (s ClientConfig) GracePeriod() int64 { - return int64(s.Struct.Uint64(40)) -} - -func (s ClientConfig) SetGracePeriod(v int64) { - s.Struct.SetUint64(40, uint64(v)) -} - -func (s ClientConfig) DohProxyConfigs() (DoHProxyConfig_List, error) { +func (s ClientConfig) SupervisorConfig() (SupervisorConfig, error) { p, err := s.Struct.Ptr(0) - return DoHProxyConfig_List{List: p.List()}, err + return SupervisorConfig{Struct: p.Struct()}, err } -func (s ClientConfig) HasDohProxyConfigs() bool { +func (s ClientConfig) HasSupervisorConfig() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } +func (s ClientConfig) SetSupervisorConfig(v SupervisorConfig) error { + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewSupervisorConfig sets the supervisorConfig field to a newly +// allocated SupervisorConfig struct, preferring placement in s's segment. +func (s ClientConfig) NewSupervisorConfig() (SupervisorConfig, error) { + ss, err := NewSupervisorConfig(s.Struct.Segment()) + if err != nil { + return SupervisorConfig{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s ClientConfig) EdgeConnectionConfig() (EdgeConnectionConfig, error) { + p, err := s.Struct.Ptr(1) + return EdgeConnectionConfig{Struct: p.Struct()}, err +} + +func (s ClientConfig) HasEdgeConnectionConfig() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s ClientConfig) SetEdgeConnectionConfig(v EdgeConnectionConfig) error { + return s.Struct.SetPtr(1, v.Struct.ToPtr()) +} + +// NewEdgeConnectionConfig sets the edgeConnectionConfig field to a newly +// allocated EdgeConnectionConfig struct, preferring placement in s's segment. +func (s ClientConfig) NewEdgeConnectionConfig() (EdgeConnectionConfig, error) { + ss, err := NewEdgeConnectionConfig(s.Struct.Segment()) + if err != nil { + return EdgeConnectionConfig{}, err + } + err = s.Struct.SetPtr(1, ss.Struct.ToPtr()) + return ss, err +} + +func (s ClientConfig) DohProxyConfigs() (DoHProxyConfig_List, error) { + p, err := s.Struct.Ptr(2) + return DoHProxyConfig_List{List: p.List()}, err +} + +func (s ClientConfig) HasDohProxyConfigs() bool { + p, err := s.Struct.Ptr(2) + return p.IsValid() || err != nil +} + func (s ClientConfig) SetDohProxyConfigs(v DoHProxyConfig_List) error { - return s.Struct.SetPtr(0, v.List.ToPtr()) + return s.Struct.SetPtr(2, v.List.ToPtr()) } // NewDohProxyConfigs sets the dohProxyConfigs field to a newly @@ -893,22 +932,22 @@ func (s ClientConfig) NewDohProxyConfigs(n int32) (DoHProxyConfig_List, error) { if err != nil { return DoHProxyConfig_List{}, err } - err = s.Struct.SetPtr(0, l.List.ToPtr()) + err = s.Struct.SetPtr(2, l.List.ToPtr()) return l, err } func (s ClientConfig) ReverseProxyConfigs() (ReverseProxyConfig_List, error) { - p, err := s.Struct.Ptr(1) + p, err := s.Struct.Ptr(3) return ReverseProxyConfig_List{List: p.List()}, err } func (s ClientConfig) HasReverseProxyConfigs() bool { - p, err := s.Struct.Ptr(1) + p, err := s.Struct.Ptr(3) return p.IsValid() || err != nil } func (s ClientConfig) SetReverseProxyConfigs(v ReverseProxyConfig_List) error { - return s.Struct.SetPtr(1, v.List.ToPtr()) + return s.Struct.SetPtr(3, v.List.ToPtr()) } // NewReverseProxyConfigs sets the reverseProxyConfigs field to a newly @@ -918,24 +957,16 @@ func (s ClientConfig) NewReverseProxyConfigs(n int32) (ReverseProxyConfig_List, if err != nil { return ReverseProxyConfig_List{}, err } - err = s.Struct.SetPtr(1, l.List.ToPtr()) + err = s.Struct.SetPtr(3, l.List.ToPtr()) return l, err } -func (s ClientConfig) NumHAConnections() uint8 { - return s.Struct.Uint8(48) -} - -func (s ClientConfig) SetNumHAConnections(v uint8) { - s.Struct.SetUint8(48, v) -} - // ClientConfig_List is a list of ClientConfig. type ClientConfig_List struct{ capnp.List } // NewClientConfig creates a new list of ClientConfig. func NewClientConfig_List(s *capnp.Segment, sz int32) (ClientConfig_List, error) { - l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 56, PointerCount: 2}, sz) + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 4}, sz) return ClientConfig_List{l}, err } @@ -943,6 +974,11 @@ func (s ClientConfig_List) At(i int) ClientConfig { return ClientConfig{s.List.S func (s ClientConfig_List) Set(i int, v ClientConfig) error { return s.List.SetStruct(i, v.Struct) } +func (s ClientConfig_List) String() string { + str, _ := text.MarshalList(0xf0a143f1c95a678e, s.List) + return str +} + // ClientConfig_Promise is a wrapper for a ClientConfig promised by a client call. type ClientConfig_Promise struct{ *capnp.Pipeline } @@ -951,6 +987,197 @@ func (p ClientConfig_Promise) Struct() (ClientConfig, error) { return ClientConfig{s}, err } +func (p ClientConfig_Promise) SupervisorConfig() SupervisorConfig_Promise { + return SupervisorConfig_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p ClientConfig_Promise) EdgeConnectionConfig() EdgeConnectionConfig_Promise { + return EdgeConnectionConfig_Promise{Pipeline: p.Pipeline.GetPipeline(1)} +} + +type SupervisorConfig struct{ capnp.Struct } + +// SupervisorConfig_TypeID is the unique identifier for the type SupervisorConfig. +const SupervisorConfig_TypeID = 0xf7f49b3f779ae258 + +func NewSupervisorConfig(s *capnp.Segment) (SupervisorConfig, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 0}) + return SupervisorConfig{st}, err +} + +func NewRootSupervisorConfig(s *capnp.Segment) (SupervisorConfig, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 24, PointerCount: 0}) + return SupervisorConfig{st}, err +} + +func ReadRootSupervisorConfig(msg *capnp.Message) (SupervisorConfig, error) { + root, err := msg.RootPtr() + return SupervisorConfig{root.Struct()}, err +} + +func (s SupervisorConfig) String() string { + str, _ := text.Marshal(0xf7f49b3f779ae258, s.Struct) + return str +} + +func (s SupervisorConfig) AutoUpdateFrequency() int64 { + return int64(s.Struct.Uint64(0)) +} + +func (s SupervisorConfig) SetAutoUpdateFrequency(v int64) { + s.Struct.SetUint64(0, uint64(v)) +} + +func (s SupervisorConfig) MetricsUpdateFrequency() int64 { + return int64(s.Struct.Uint64(8)) +} + +func (s SupervisorConfig) SetMetricsUpdateFrequency(v int64) { + s.Struct.SetUint64(8, uint64(v)) +} + +func (s SupervisorConfig) GracePeriod() int64 { + return int64(s.Struct.Uint64(16)) +} + +func (s SupervisorConfig) SetGracePeriod(v int64) { + s.Struct.SetUint64(16, uint64(v)) +} + +// SupervisorConfig_List is a list of SupervisorConfig. +type SupervisorConfig_List struct{ capnp.List } + +// NewSupervisorConfig creates a new list of SupervisorConfig. +func NewSupervisorConfig_List(s *capnp.Segment, sz int32) (SupervisorConfig_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 24, PointerCount: 0}, sz) + return SupervisorConfig_List{l}, err +} + +func (s SupervisorConfig_List) At(i int) SupervisorConfig { return SupervisorConfig{s.List.Struct(i)} } + +func (s SupervisorConfig_List) Set(i int, v SupervisorConfig) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s SupervisorConfig_List) String() string { + str, _ := text.MarshalList(0xf7f49b3f779ae258, s.List) + return str +} + +// SupervisorConfig_Promise is a wrapper for a SupervisorConfig promised by a client call. +type SupervisorConfig_Promise struct{ *capnp.Pipeline } + +func (p SupervisorConfig_Promise) Struct() (SupervisorConfig, error) { + s, err := p.Pipeline.Struct() + return SupervisorConfig{s}, err +} + +type EdgeConnectionConfig struct{ capnp.Struct } + +// EdgeConnectionConfig_TypeID is the unique identifier for the type EdgeConnectionConfig. +const EdgeConnectionConfig_TypeID = 0xc744e349009087aa + +func NewEdgeConnectionConfig(s *capnp.Segment) (EdgeConnectionConfig, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 32, PointerCount: 1}) + return EdgeConnectionConfig{st}, err +} + +func NewRootEdgeConnectionConfig(s *capnp.Segment) (EdgeConnectionConfig, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 32, PointerCount: 1}) + return EdgeConnectionConfig{st}, err +} + +func ReadRootEdgeConnectionConfig(msg *capnp.Message) (EdgeConnectionConfig, error) { + root, err := msg.RootPtr() + return EdgeConnectionConfig{root.Struct()}, err +} + +func (s EdgeConnectionConfig) String() string { + str, _ := text.Marshal(0xc744e349009087aa, s.Struct) + return str +} + +func (s EdgeConnectionConfig) NumHAConnections() uint8 { + return s.Struct.Uint8(0) +} + +func (s EdgeConnectionConfig) SetNumHAConnections(v uint8) { + s.Struct.SetUint8(0, v) +} + +func (s EdgeConnectionConfig) HeartbeatInterval() int64 { + return int64(s.Struct.Uint64(8)) +} + +func (s EdgeConnectionConfig) SetHeartbeatInterval(v int64) { + s.Struct.SetUint64(8, uint64(v)) +} + +func (s EdgeConnectionConfig) Timeout() int64 { + return int64(s.Struct.Uint64(16)) +} + +func (s EdgeConnectionConfig) SetTimeout(v int64) { + s.Struct.SetUint64(16, uint64(v)) +} + +func (s EdgeConnectionConfig) MaxFailedHeartbeats() uint64 { + return s.Struct.Uint64(24) +} + +func (s EdgeConnectionConfig) SetMaxFailedHeartbeats(v uint64) { + s.Struct.SetUint64(24, v) +} + +func (s EdgeConnectionConfig) UserCredentialPath() (string, error) { + p, err := s.Struct.Ptr(0) + return p.Text(), err +} + +func (s EdgeConnectionConfig) HasUserCredentialPath() bool { + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s EdgeConnectionConfig) UserCredentialPathBytes() ([]byte, error) { + p, err := s.Struct.Ptr(0) + return p.TextBytes(), err +} + +func (s EdgeConnectionConfig) SetUserCredentialPath(v string) error { + return s.Struct.SetText(0, v) +} + +// EdgeConnectionConfig_List is a list of EdgeConnectionConfig. +type EdgeConnectionConfig_List struct{ capnp.List } + +// NewEdgeConnectionConfig creates a new list of EdgeConnectionConfig. +func NewEdgeConnectionConfig_List(s *capnp.Segment, sz int32) (EdgeConnectionConfig_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 32, PointerCount: 1}, sz) + return EdgeConnectionConfig_List{l}, err +} + +func (s EdgeConnectionConfig_List) At(i int) EdgeConnectionConfig { + return EdgeConnectionConfig{s.List.Struct(i)} +} + +func (s EdgeConnectionConfig_List) Set(i int, v EdgeConnectionConfig) error { + return s.List.SetStruct(i, v.Struct) +} + +func (s EdgeConnectionConfig_List) String() string { + str, _ := text.MarshalList(0xc744e349009087aa, s.List) + return str +} + +// EdgeConnectionConfig_Promise is a wrapper for a EdgeConnectionConfig promised by a client call. +type EdgeConnectionConfig_Promise struct{ *capnp.Pipeline } + +func (p EdgeConnectionConfig_Promise) Struct() (EdgeConnectionConfig, error) { + s, err := p.Pipeline.Struct() + return EdgeConnectionConfig{s}, err +} + type ReverseProxyConfig struct{ capnp.Struct } type ReverseProxyConfig_origin ReverseProxyConfig type ReverseProxyConfig_origin_Which uint16 @@ -1023,6 +1250,9 @@ func (s ReverseProxyConfig_origin) Which() ReverseProxyConfig_origin_Which { return ReverseProxyConfig_origin_Which(s.Struct.Uint16(0)) } func (s ReverseProxyConfig_origin) Http() (HTTPOriginConfig, error) { + if s.Struct.Uint16(0) != 0 { + panic("Which() != http") + } p, err := s.Struct.Ptr(1) return HTTPOriginConfig{Struct: p.Struct()}, err } @@ -1053,6 +1283,9 @@ func (s ReverseProxyConfig_origin) NewHttp() (HTTPOriginConfig, error) { } func (s ReverseProxyConfig_origin) Websocket() (WebSocketOriginConfig, error) { + if s.Struct.Uint16(0) != 1 { + panic("Which() != websocket") + } p, err := s.Struct.Ptr(1) return WebSocketOriginConfig{Struct: p.Struct()}, err } @@ -1083,6 +1316,9 @@ func (s ReverseProxyConfig_origin) NewWebsocket() (WebSocketOriginConfig, error) } func (s ReverseProxyConfig_origin) HelloWorld() (HelloWorldOriginConfig, error) { + if s.Struct.Uint16(0) != 2 { + panic("Which() != helloWorld") + } p, err := s.Struct.Ptr(1) return HelloWorldOriginConfig{Struct: p.Struct()}, err } @@ -1153,6 +1389,11 @@ func (s ReverseProxyConfig_List) Set(i int, v ReverseProxyConfig) error { return s.List.SetStruct(i, v.Struct) } +func (s ReverseProxyConfig_List) String() string { + str, _ := text.MarshalList(0xc766a92976e389c4, s.List) + return str +} + // ReverseProxyConfig_Promise is a wrapper for a ReverseProxyConfig promised by a client call. type ReverseProxyConfig_Promise struct{ *capnp.Pipeline } @@ -1210,22 +1451,22 @@ func (s WebSocketOriginConfig) String() string { return str } -func (s WebSocketOriginConfig) Url() (string, error) { +func (s WebSocketOriginConfig) UrlString() (string, error) { p, err := s.Struct.Ptr(0) return p.Text(), err } -func (s WebSocketOriginConfig) HasUrl() bool { +func (s WebSocketOriginConfig) HasUrlString() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } -func (s WebSocketOriginConfig) UrlBytes() ([]byte, error) { +func (s WebSocketOriginConfig) UrlStringBytes() ([]byte, error) { p, err := s.Struct.Ptr(0) return p.TextBytes(), err } -func (s WebSocketOriginConfig) SetUrl(v string) error { +func (s WebSocketOriginConfig) SetUrlString(v string) error { return s.Struct.SetText(0, v) } @@ -1292,6 +1533,11 @@ func (s WebSocketOriginConfig_List) Set(i int, v WebSocketOriginConfig) error { return s.List.SetStruct(i, v.Struct) } +func (s WebSocketOriginConfig_List) String() string { + str, _ := text.MarshalList(0xf9c895683ed9ac4c, s.List) + return str +} + // WebSocketOriginConfig_Promise is a wrapper for a WebSocketOriginConfig promised by a client call. type WebSocketOriginConfig_Promise struct{ *capnp.Pipeline } @@ -1301,25 +1547,6 @@ func (p WebSocketOriginConfig_Promise) Struct() (WebSocketOriginConfig, error) { } type HTTPOriginConfig struct{ capnp.Struct } -type HTTPOriginConfig_originAddr HTTPOriginConfig -type HTTPOriginConfig_originAddr_Which uint16 - -const ( - HTTPOriginConfig_originAddr_Which_http HTTPOriginConfig_originAddr_Which = 0 - HTTPOriginConfig_originAddr_Which_unix HTTPOriginConfig_originAddr_Which = 1 -) - -func (w HTTPOriginConfig_originAddr_Which) String() string { - const s = "httpunix" - switch w { - case HTTPOriginConfig_originAddr_Which_http: - return s[0:4] - case HTTPOriginConfig_originAddr_Which_unix: - return s[4:8] - - } - return "HTTPOriginConfig_originAddr_Which(" + strconv.FormatUint(uint64(w), 10) + ")" -} // HTTPOriginConfig_TypeID is the unique identifier for the type HTTPOriginConfig. const HTTPOriginConfig_TypeID = 0xe4a6a1bc139211b4 @@ -1344,87 +1571,39 @@ func (s HTTPOriginConfig) String() string { return str } -func (s HTTPOriginConfig) OriginAddr() HTTPOriginConfig_originAddr { - return HTTPOriginConfig_originAddr(s) -} - -func (s HTTPOriginConfig_originAddr) Which() HTTPOriginConfig_originAddr_Which { - return HTTPOriginConfig_originAddr_Which(s.Struct.Uint16(0)) -} -func (s HTTPOriginConfig_originAddr) Http() (CapnpHTTPURL, error) { +func (s HTTPOriginConfig) UrlString() (string, error) { p, err := s.Struct.Ptr(0) - return CapnpHTTPURL{Struct: p.Struct()}, err + return p.Text(), err } -func (s HTTPOriginConfig_originAddr) HasHttp() bool { - if s.Struct.Uint16(0) != 0 { - return false - } +func (s HTTPOriginConfig) HasUrlString() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } -func (s HTTPOriginConfig_originAddr) SetHttp(v CapnpHTTPURL) error { - s.Struct.SetUint16(0, 0) - return s.Struct.SetPtr(0, v.Struct.ToPtr()) -} - -// NewHttp sets the http field to a newly -// allocated CapnpHTTPURL struct, preferring placement in s's segment. -func (s HTTPOriginConfig_originAddr) NewHttp() (CapnpHTTPURL, error) { - s.Struct.SetUint16(0, 0) - ss, err := NewCapnpHTTPURL(s.Struct.Segment()) - if err != nil { - return CapnpHTTPURL{}, err - } - err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) - return ss, err -} - -func (s HTTPOriginConfig_originAddr) Unix() (UnixPath, error) { +func (s HTTPOriginConfig) UrlStringBytes() ([]byte, error) { p, err := s.Struct.Ptr(0) - return UnixPath{Struct: p.Struct()}, err + return p.TextBytes(), err } -func (s HTTPOriginConfig_originAddr) HasUnix() bool { - if s.Struct.Uint16(0) != 1 { - return false - } - p, err := s.Struct.Ptr(0) - return p.IsValid() || err != nil -} - -func (s HTTPOriginConfig_originAddr) SetUnix(v UnixPath) error { - s.Struct.SetUint16(0, 1) - return s.Struct.SetPtr(0, v.Struct.ToPtr()) -} - -// NewUnix sets the unix field to a newly -// allocated UnixPath struct, preferring placement in s's segment. -func (s HTTPOriginConfig_originAddr) NewUnix() (UnixPath, error) { - s.Struct.SetUint16(0, 1) - ss, err := NewUnixPath(s.Struct.Segment()) - if err != nil { - return UnixPath{}, err - } - err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) - return ss, err +func (s HTTPOriginConfig) SetUrlString(v string) error { + return s.Struct.SetText(0, v) } func (s HTTPOriginConfig) TcpKeepAlive() int64 { - return int64(s.Struct.Uint64(8)) + return int64(s.Struct.Uint64(0)) } func (s HTTPOriginConfig) SetTcpKeepAlive(v int64) { - s.Struct.SetUint64(8, uint64(v)) + s.Struct.SetUint64(0, uint64(v)) } func (s HTTPOriginConfig) DialDualStack() bool { - return s.Struct.Bit(16) + return s.Struct.Bit(64) } func (s HTTPOriginConfig) SetDialDualStack(v bool) { - s.Struct.SetBit(16, v) + s.Struct.SetBit(64, v) } func (s HTTPOriginConfig) TlsHandshakeTimeout() int64 { @@ -1436,11 +1615,11 @@ func (s HTTPOriginConfig) SetTlsHandshakeTimeout(v int64) { } func (s HTTPOriginConfig) TlsVerify() bool { - return s.Struct.Bit(17) + return s.Struct.Bit(65) } func (s HTTPOriginConfig) SetTlsVerify(v bool) { - s.Struct.SetBit(17, v) + s.Struct.SetBit(65, v) } func (s HTTPOriginConfig) OriginCAPool() (string, error) { @@ -1514,11 +1693,11 @@ func (s HTTPOriginConfig) SetExpectContinueTimeout(v int64) { } func (s HTTPOriginConfig) ChunkedEncoding() bool { - return s.Struct.Bit(18) + return s.Struct.Bit(66) } func (s HTTPOriginConfig) SetChunkedEncoding(v bool) { - s.Struct.SetBit(18, v) + s.Struct.SetBit(66, v) } // HTTPOriginConfig_List is a list of HTTPOriginConfig. @@ -1536,6 +1715,11 @@ func (s HTTPOriginConfig_List) Set(i int, v HTTPOriginConfig) error { return s.List.SetStruct(i, v.Struct) } +func (s HTTPOriginConfig_List) String() string { + str, _ := text.MarshalList(0xe4a6a1bc139211b4, s.List) + return str +} + // HTTPOriginConfig_Promise is a wrapper for a HTTPOriginConfig promised by a client call. type HTTPOriginConfig_Promise struct{ *capnp.Pipeline } @@ -1544,156 +1728,6 @@ func (p HTTPOriginConfig_Promise) Struct() (HTTPOriginConfig, error) { return HTTPOriginConfig{s}, err } -func (p HTTPOriginConfig_Promise) OriginAddr() HTTPOriginConfig_originAddr_Promise { - return HTTPOriginConfig_originAddr_Promise{p.Pipeline} -} - -// HTTPOriginConfig_originAddr_Promise is a wrapper for a HTTPOriginConfig_originAddr promised by a client call. -type HTTPOriginConfig_originAddr_Promise struct{ *capnp.Pipeline } - -func (p HTTPOriginConfig_originAddr_Promise) Struct() (HTTPOriginConfig_originAddr, error) { - s, err := p.Pipeline.Struct() - return HTTPOriginConfig_originAddr{s}, err -} - -func (p HTTPOriginConfig_originAddr_Promise) Http() CapnpHTTPURL_Promise { - return CapnpHTTPURL_Promise{Pipeline: p.Pipeline.GetPipeline(0)} -} - -func (p HTTPOriginConfig_originAddr_Promise) Unix() UnixPath_Promise { - return UnixPath_Promise{Pipeline: p.Pipeline.GetPipeline(0)} -} - -type CapnpHTTPURL struct{ capnp.Struct } - -// CapnpHTTPURL_TypeID is the unique identifier for the type CapnpHTTPURL. -const CapnpHTTPURL_TypeID = 0xa160eb416f17c28e - -func NewCapnpHTTPURL(s *capnp.Segment) (CapnpHTTPURL, error) { - st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return CapnpHTTPURL{st}, err -} - -func NewRootCapnpHTTPURL(s *capnp.Segment) (CapnpHTTPURL, error) { - st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return CapnpHTTPURL{st}, err -} - -func ReadRootCapnpHTTPURL(msg *capnp.Message) (CapnpHTTPURL, error) { - root, err := msg.RootPtr() - return CapnpHTTPURL{root.Struct()}, err -} - -func (s CapnpHTTPURL) String() string { - str, _ := text.Marshal(0xa160eb416f17c28e, s.Struct) - return str -} - -func (s CapnpHTTPURL) Url() (string, error) { - p, err := s.Struct.Ptr(0) - return p.Text(), err -} - -func (s CapnpHTTPURL) HasUrl() bool { - p, err := s.Struct.Ptr(0) - return p.IsValid() || err != nil -} - -func (s CapnpHTTPURL) UrlBytes() ([]byte, error) { - p, err := s.Struct.Ptr(0) - return p.TextBytes(), err -} - -func (s CapnpHTTPURL) SetUrl(v string) error { - return s.Struct.SetText(0, v) -} - -// CapnpHTTPURL_List is a list of CapnpHTTPURL. -type CapnpHTTPURL_List struct{ capnp.List } - -// NewCapnpHTTPURL creates a new list of CapnpHTTPURL. -func NewCapnpHTTPURL_List(s *capnp.Segment, sz int32) (CapnpHTTPURL_List, error) { - l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return CapnpHTTPURL_List{l}, err -} - -func (s CapnpHTTPURL_List) At(i int) CapnpHTTPURL { return CapnpHTTPURL{s.List.Struct(i)} } - -func (s CapnpHTTPURL_List) Set(i int, v CapnpHTTPURL) error { return s.List.SetStruct(i, v.Struct) } - -// CapnpHTTPURL_Promise is a wrapper for a CapnpHTTPURL promised by a client call. -type CapnpHTTPURL_Promise struct{ *capnp.Pipeline } - -func (p CapnpHTTPURL_Promise) Struct() (CapnpHTTPURL, error) { - s, err := p.Pipeline.Struct() - return CapnpHTTPURL{s}, err -} - -type UnixPath struct{ capnp.Struct } - -// UnixPath_TypeID is the unique identifier for the type UnixPath. -const UnixPath_TypeID = 0xf7e406af6bd5236c - -func NewUnixPath(s *capnp.Segment) (UnixPath, error) { - st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return UnixPath{st}, err -} - -func NewRootUnixPath(s *capnp.Segment) (UnixPath, error) { - st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}) - return UnixPath{st}, err -} - -func ReadRootUnixPath(msg *capnp.Message) (UnixPath, error) { - root, err := msg.RootPtr() - return UnixPath{root.Struct()}, err -} - -func (s UnixPath) String() string { - str, _ := text.Marshal(0xf7e406af6bd5236c, s.Struct) - return str -} - -func (s UnixPath) Path() (string, error) { - p, err := s.Struct.Ptr(0) - return p.Text(), err -} - -func (s UnixPath) HasPath() bool { - p, err := s.Struct.Ptr(0) - return p.IsValid() || err != nil -} - -func (s UnixPath) PathBytes() ([]byte, error) { - p, err := s.Struct.Ptr(0) - return p.TextBytes(), err -} - -func (s UnixPath) SetPath(v string) error { - return s.Struct.SetText(0, v) -} - -// UnixPath_List is a list of UnixPath. -type UnixPath_List struct{ capnp.List } - -// NewUnixPath creates a new list of UnixPath. -func NewUnixPath_List(s *capnp.Segment, sz int32) (UnixPath_List, error) { - l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 1}, sz) - return UnixPath_List{l}, err -} - -func (s UnixPath_List) At(i int) UnixPath { return UnixPath{s.List.Struct(i)} } - -func (s UnixPath_List) Set(i int, v UnixPath) error { return s.List.SetStruct(i, v.Struct) } - -// UnixPath_Promise is a wrapper for a UnixPath promised by a client call. -type UnixPath_Promise struct{ *capnp.Pipeline } - -func (p UnixPath_Promise) Struct() (UnixPath, error) { - s, err := p.Pipeline.Struct() - return UnixPath{s}, err -} - type DoHProxyConfig struct{ capnp.Struct } // DoHProxyConfig_TypeID is the unique identifier for the type DoHProxyConfig. @@ -1784,6 +1818,11 @@ func (s DoHProxyConfig_List) At(i int) DoHProxyConfig { return DoHProxyConfig{s. func (s DoHProxyConfig_List) Set(i int, v DoHProxyConfig) error { return s.List.SetStruct(i, v.Struct) } +func (s DoHProxyConfig_List) String() string { + str, _ := text.MarshalList(0xb167b0bebe562cd0, s.List) + return str +} + // DoHProxyConfig_Promise is a wrapper for a DoHProxyConfig promised by a client call. type DoHProxyConfig_Promise struct{ *capnp.Pipeline } @@ -1834,6 +1873,11 @@ func (s HelloWorldOriginConfig_List) Set(i int, v HelloWorldOriginConfig) error return s.List.SetStruct(i, v.Struct) } +func (s HelloWorldOriginConfig_List) String() string { + str, _ := text.MarshalList(0x8891f360e47c30d3, s.List) + return str +} + // HelloWorldOriginConfig_Promise is a wrapper for a HelloWorldOriginConfig promised by a client call. type HelloWorldOriginConfig_Promise struct{ *capnp.Pipeline } @@ -1918,6 +1962,11 @@ func (s Tag_List) At(i int) Tag { return Tag{s.List.Struct(i)} } func (s Tag_List) Set(i int, v Tag) error { return s.List.SetStruct(i, v.Struct) } +func (s Tag_List) String() string { + str, _ := text.MarshalList(0xcbd96442ae3bb01a, s.List) + return str +} + // Tag_Promise is a wrapper for a Tag promised by a client call. type Tag_Promise struct{ *capnp.Pipeline } @@ -2043,6 +2092,11 @@ func (s ServerInfo_List) At(i int) ServerInfo { return ServerInfo{s.List.Struct( func (s ServerInfo_List) Set(i int, v ServerInfo) error { return s.List.SetStruct(i, v.Struct) } +func (s ServerInfo_List) String() string { + str, _ := text.MarshalList(0xf2c68e2547ec3866, s.List) + return str +} + // ServerInfo_Promise is a wrapper for a ServerInfo promised by a client call. type ServerInfo_Promise struct{ *capnp.Pipeline } @@ -2084,23 +2138,29 @@ func (s UseConfigurationResult) SetSuccess(v bool) { s.Struct.SetBit(0, v) } -func (s UseConfigurationResult) ErrorMessage() (string, error) { +func (s UseConfigurationResult) FailedConfigs() (FailedConfig_List, error) { p, err := s.Struct.Ptr(0) - return p.Text(), err + return FailedConfig_List{List: p.List()}, err } -func (s UseConfigurationResult) HasErrorMessage() bool { +func (s UseConfigurationResult) HasFailedConfigs() bool { p, err := s.Struct.Ptr(0) return p.IsValid() || err != nil } -func (s UseConfigurationResult) ErrorMessageBytes() ([]byte, error) { - p, err := s.Struct.Ptr(0) - return p.TextBytes(), err +func (s UseConfigurationResult) SetFailedConfigs(v FailedConfig_List) error { + return s.Struct.SetPtr(0, v.List.ToPtr()) } -func (s UseConfigurationResult) SetErrorMessage(v string) error { - return s.Struct.SetText(0, v) +// NewFailedConfigs sets the failedConfigs field to a newly +// allocated FailedConfig_List, preferring placement in s's segment. +func (s UseConfigurationResult) NewFailedConfigs(n int32) (FailedConfig_List, error) { + l, err := NewFailedConfig_List(s.Struct.Segment(), n) + if err != nil { + return FailedConfig_List{}, err + } + err = s.Struct.SetPtr(0, l.List.ToPtr()) + return l, err } // UseConfigurationResult_List is a list of UseConfigurationResult. @@ -2120,6 +2180,11 @@ func (s UseConfigurationResult_List) Set(i int, v UseConfigurationResult) error return s.List.SetStruct(i, v.Struct) } +func (s UseConfigurationResult_List) String() string { + str, _ := text.MarshalList(0xd58a254e7a792b87, s.List) + return str +} + // UseConfigurationResult_Promise is a wrapper for a UseConfigurationResult promised by a client call. type UseConfigurationResult_Promise struct{ *capnp.Pipeline } @@ -2128,6 +2193,266 @@ func (p UseConfigurationResult_Promise) Struct() (UseConfigurationResult, error) return UseConfigurationResult{s}, err } +type FailedConfig struct{ capnp.Struct } +type FailedConfig_config FailedConfig +type FailedConfig_config_Which uint16 + +const ( + FailedConfig_config_Which_supervisor FailedConfig_config_Which = 0 + FailedConfig_config_Which_edgeConnection FailedConfig_config_Which = 1 + FailedConfig_config_Which_doh FailedConfig_config_Which = 2 + FailedConfig_config_Which_reverseProxy FailedConfig_config_Which = 3 +) + +func (w FailedConfig_config_Which) String() string { + const s = "supervisoredgeConnectiondohreverseProxy" + switch w { + case FailedConfig_config_Which_supervisor: + return s[0:10] + case FailedConfig_config_Which_edgeConnection: + return s[10:24] + case FailedConfig_config_Which_doh: + return s[24:27] + case FailedConfig_config_Which_reverseProxy: + return s[27:39] + + } + return "FailedConfig_config_Which(" + strconv.FormatUint(uint64(w), 10) + ")" +} + +// FailedConfig_TypeID is the unique identifier for the type FailedConfig. +const FailedConfig_TypeID = 0xea20b390b257d1a5 + +func NewFailedConfig(s *capnp.Segment) (FailedConfig, error) { + st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return FailedConfig{st}, err +} + +func NewRootFailedConfig(s *capnp.Segment) (FailedConfig, error) { + st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}) + return FailedConfig{st}, err +} + +func ReadRootFailedConfig(msg *capnp.Message) (FailedConfig, error) { + root, err := msg.RootPtr() + return FailedConfig{root.Struct()}, err +} + +func (s FailedConfig) String() string { + str, _ := text.Marshal(0xea20b390b257d1a5, s.Struct) + return str +} + +func (s FailedConfig) Config() FailedConfig_config { return FailedConfig_config(s) } + +func (s FailedConfig_config) Which() FailedConfig_config_Which { + return FailedConfig_config_Which(s.Struct.Uint16(0)) +} +func (s FailedConfig_config) Supervisor() (SupervisorConfig, error) { + if s.Struct.Uint16(0) != 0 { + panic("Which() != supervisor") + } + p, err := s.Struct.Ptr(0) + return SupervisorConfig{Struct: p.Struct()}, err +} + +func (s FailedConfig_config) HasSupervisor() bool { + if s.Struct.Uint16(0) != 0 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s FailedConfig_config) SetSupervisor(v SupervisorConfig) error { + s.Struct.SetUint16(0, 0) + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewSupervisor sets the supervisor field to a newly +// allocated SupervisorConfig struct, preferring placement in s's segment. +func (s FailedConfig_config) NewSupervisor() (SupervisorConfig, error) { + s.Struct.SetUint16(0, 0) + ss, err := NewSupervisorConfig(s.Struct.Segment()) + if err != nil { + return SupervisorConfig{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s FailedConfig_config) EdgeConnection() (EdgeConnectionConfig, error) { + if s.Struct.Uint16(0) != 1 { + panic("Which() != edgeConnection") + } + p, err := s.Struct.Ptr(0) + return EdgeConnectionConfig{Struct: p.Struct()}, err +} + +func (s FailedConfig_config) HasEdgeConnection() bool { + if s.Struct.Uint16(0) != 1 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s FailedConfig_config) SetEdgeConnection(v EdgeConnectionConfig) error { + s.Struct.SetUint16(0, 1) + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewEdgeConnection sets the edgeConnection field to a newly +// allocated EdgeConnectionConfig struct, preferring placement in s's segment. +func (s FailedConfig_config) NewEdgeConnection() (EdgeConnectionConfig, error) { + s.Struct.SetUint16(0, 1) + ss, err := NewEdgeConnectionConfig(s.Struct.Segment()) + if err != nil { + return EdgeConnectionConfig{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s FailedConfig_config) Doh() (DoHProxyConfig, error) { + if s.Struct.Uint16(0) != 2 { + panic("Which() != doh") + } + p, err := s.Struct.Ptr(0) + return DoHProxyConfig{Struct: p.Struct()}, err +} + +func (s FailedConfig_config) HasDoh() bool { + if s.Struct.Uint16(0) != 2 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s FailedConfig_config) SetDoh(v DoHProxyConfig) error { + s.Struct.SetUint16(0, 2) + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewDoh sets the doh field to a newly +// allocated DoHProxyConfig struct, preferring placement in s's segment. +func (s FailedConfig_config) NewDoh() (DoHProxyConfig, error) { + s.Struct.SetUint16(0, 2) + ss, err := NewDoHProxyConfig(s.Struct.Segment()) + if err != nil { + return DoHProxyConfig{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s FailedConfig_config) ReverseProxy() (ReverseProxyConfig, error) { + if s.Struct.Uint16(0) != 3 { + panic("Which() != reverseProxy") + } + p, err := s.Struct.Ptr(0) + return ReverseProxyConfig{Struct: p.Struct()}, err +} + +func (s FailedConfig_config) HasReverseProxy() bool { + if s.Struct.Uint16(0) != 3 { + return false + } + p, err := s.Struct.Ptr(0) + return p.IsValid() || err != nil +} + +func (s FailedConfig_config) SetReverseProxy(v ReverseProxyConfig) error { + s.Struct.SetUint16(0, 3) + return s.Struct.SetPtr(0, v.Struct.ToPtr()) +} + +// NewReverseProxy sets the reverseProxy field to a newly +// allocated ReverseProxyConfig struct, preferring placement in s's segment. +func (s FailedConfig_config) NewReverseProxy() (ReverseProxyConfig, error) { + s.Struct.SetUint16(0, 3) + ss, err := NewReverseProxyConfig(s.Struct.Segment()) + if err != nil { + return ReverseProxyConfig{}, err + } + err = s.Struct.SetPtr(0, ss.Struct.ToPtr()) + return ss, err +} + +func (s FailedConfig) Reason() (string, error) { + p, err := s.Struct.Ptr(1) + return p.Text(), err +} + +func (s FailedConfig) HasReason() bool { + p, err := s.Struct.Ptr(1) + return p.IsValid() || err != nil +} + +func (s FailedConfig) ReasonBytes() ([]byte, error) { + p, err := s.Struct.Ptr(1) + return p.TextBytes(), err +} + +func (s FailedConfig) SetReason(v string) error { + return s.Struct.SetText(1, v) +} + +// FailedConfig_List is a list of FailedConfig. +type FailedConfig_List struct{ capnp.List } + +// NewFailedConfig creates a new list of FailedConfig. +func NewFailedConfig_List(s *capnp.Segment, sz int32) (FailedConfig_List, error) { + l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 8, PointerCount: 2}, sz) + return FailedConfig_List{l}, err +} + +func (s FailedConfig_List) At(i int) FailedConfig { return FailedConfig{s.List.Struct(i)} } + +func (s FailedConfig_List) Set(i int, v FailedConfig) error { return s.List.SetStruct(i, v.Struct) } + +func (s FailedConfig_List) String() string { + str, _ := text.MarshalList(0xea20b390b257d1a5, s.List) + return str +} + +// FailedConfig_Promise is a wrapper for a FailedConfig promised by a client call. +type FailedConfig_Promise struct{ *capnp.Pipeline } + +func (p FailedConfig_Promise) Struct() (FailedConfig, error) { + s, err := p.Pipeline.Struct() + return FailedConfig{s}, err +} + +func (p FailedConfig_Promise) Config() FailedConfig_config_Promise { + return FailedConfig_config_Promise{p.Pipeline} +} + +// FailedConfig_config_Promise is a wrapper for a FailedConfig_config promised by a client call. +type FailedConfig_config_Promise struct{ *capnp.Pipeline } + +func (p FailedConfig_config_Promise) Struct() (FailedConfig_config, error) { + s, err := p.Pipeline.Struct() + return FailedConfig_config{s}, err +} + +func (p FailedConfig_config_Promise) Supervisor() SupervisorConfig_Promise { + return SupervisorConfig_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p FailedConfig_config_Promise) EdgeConnection() EdgeConnectionConfig_Promise { + return EdgeConnectionConfig_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p FailedConfig_config_Promise) Doh() DoHProxyConfig_Promise { + return DoHProxyConfig_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + +func (p FailedConfig_config_Promise) ReverseProxy() ReverseProxyConfig_Promise { + return ReverseProxyConfig_Promise{Pipeline: p.Pipeline.GetPipeline(0)} +} + type TunnelServer struct{ Client capnp.Client } // TunnelServer_TypeID is the unique identifier for the type TunnelServer. @@ -2425,6 +2750,11 @@ func (s TunnelServer_registerTunnel_Params_List) Set(i int, v TunnelServer_regis return s.List.SetStruct(i, v.Struct) } +func (s TunnelServer_registerTunnel_Params_List) String() string { + str, _ := text.MarshalList(0xb70431c0dc014915, s.List) + return str +} + // TunnelServer_registerTunnel_Params_Promise is a wrapper for a TunnelServer_registerTunnel_Params promised by a client call. type TunnelServer_registerTunnel_Params_Promise struct{ *capnp.Pipeline } @@ -2504,6 +2834,11 @@ func (s TunnelServer_registerTunnel_Results_List) Set(i int, v TunnelServer_regi return s.List.SetStruct(i, v.Struct) } +func (s TunnelServer_registerTunnel_Results_List) String() string { + str, _ := text.MarshalList(0xf2c122394f447e8e, s.List) + return str +} + // TunnelServer_registerTunnel_Results_Promise is a wrapper for a TunnelServer_registerTunnel_Results promised by a client call. type TunnelServer_registerTunnel_Results_Promise struct{ *capnp.Pipeline } @@ -2558,6 +2893,11 @@ func (s TunnelServer_getServerInfo_Params_List) Set(i int, v TunnelServer_getSer return s.List.SetStruct(i, v.Struct) } +func (s TunnelServer_getServerInfo_Params_List) String() string { + str, _ := text.MarshalList(0xdc3ed6801961e502, s.List) + return str +} + // TunnelServer_getServerInfo_Params_Promise is a wrapper for a TunnelServer_getServerInfo_Params promised by a client call. type TunnelServer_getServerInfo_Params_Promise struct{ *capnp.Pipeline } @@ -2633,6 +2973,11 @@ func (s TunnelServer_getServerInfo_Results_List) Set(i int, v TunnelServer_getSe return s.List.SetStruct(i, v.Struct) } +func (s TunnelServer_getServerInfo_Results_List) String() string { + str, _ := text.MarshalList(0xe3e37d096a5b564e, s.List) + return str +} + // TunnelServer_getServerInfo_Results_Promise is a wrapper for a TunnelServer_getServerInfo_Results promised by a client call. type TunnelServer_getServerInfo_Results_Promise struct{ *capnp.Pipeline } @@ -2695,6 +3040,11 @@ func (s TunnelServer_unregisterTunnel_Params_List) Set(i int, v TunnelServer_unr return s.List.SetStruct(i, v.Struct) } +func (s TunnelServer_unregisterTunnel_Params_List) String() string { + str, _ := text.MarshalList(0x9b87b390babc2ccf, s.List) + return str +} + // TunnelServer_unregisterTunnel_Params_Promise is a wrapper for a TunnelServer_unregisterTunnel_Params promised by a client call. type TunnelServer_unregisterTunnel_Params_Promise struct{ *capnp.Pipeline } @@ -2745,6 +3095,11 @@ func (s TunnelServer_unregisterTunnel_Results_List) Set(i int, v TunnelServer_un return s.List.SetStruct(i, v.Struct) } +func (s TunnelServer_unregisterTunnel_Results_List) String() string { + str, _ := text.MarshalList(0xa29a916d4ebdd894, s.List) + return str +} + // TunnelServer_unregisterTunnel_Results_Promise is a wrapper for a TunnelServer_unregisterTunnel_Results promised by a client call. type TunnelServer_unregisterTunnel_Results_Promise struct{ *capnp.Pipeline } @@ -2820,6 +3175,11 @@ func (s TunnelServer_connect_Params_List) Set(i int, v TunnelServer_connect_Para return s.List.SetStruct(i, v.Struct) } +func (s TunnelServer_connect_Params_List) String() string { + str, _ := text.MarshalList(0xa766b24d4fe5da35, s.List) + return str +} + // TunnelServer_connect_Params_Promise is a wrapper for a TunnelServer_connect_Params promised by a client call. type TunnelServer_connect_Params_Promise struct{ *capnp.Pipeline } @@ -2899,6 +3259,11 @@ func (s TunnelServer_connect_Results_List) Set(i int, v TunnelServer_connect_Res return s.List.SetStruct(i, v.Struct) } +func (s TunnelServer_connect_Results_List) String() string { + str, _ := text.MarshalList(0xfeac5c8f4899ef7c, s.List) + return str +} + // TunnelServer_connect_Results_Promise is a wrapper for a TunnelServer_connect_Results promised by a client call. type TunnelServer_connect_Results_Promise struct{ *capnp.Pipeline } @@ -3043,6 +3408,11 @@ func (s ClientService_useConfiguration_Params_List) Set(i int, v ClientService_u return s.List.SetStruct(i, v.Struct) } +func (s ClientService_useConfiguration_Params_List) String() string { + str, _ := text.MarshalList(0xb9d4ef45c2b5fc5b, s.List) + return str +} + // ClientService_useConfiguration_Params_Promise is a wrapper for a ClientService_useConfiguration_Params promised by a client call. type ClientService_useConfiguration_Params_Promise struct{ *capnp.Pipeline } @@ -3122,6 +3492,11 @@ func (s ClientService_useConfiguration_Results_List) Set(i int, v ClientService_ return s.List.SetStruct(i, v.Struct) } +func (s ClientService_useConfiguration_Results_List) String() string { + str, _ := text.MarshalList(0x91f7a001ca145b9d, s.List) + return str +} + // ClientService_useConfiguration_Results_Promise is a wrapper for a ClientService_useConfiguration_Results promised by a client call. type ClientService_useConfiguration_Results_Promise struct{ *capnp.Pipeline } @@ -3134,210 +3509,218 @@ func (p ClientService_useConfiguration_Results_Promise) Result() UseConfiguratio return UseConfigurationResult_Promise{Pipeline: p.Pipeline.GetPipeline(0)} } -const schema_db8274f9144abc7e = "x\xda\xacY}p\x1c\xe5y\x7f\x9e\xdd\xbb[\xc9\x96" + - "r\xb7\xdd\xf3\xc8\xba\x89G\xc55\x13\xecX\xd4\x8e\xe3" + - "\x16\xd46\xd2I\xb2\xa3s$\xfbV'9\xc1\x98\x19" + - "\xaf\xef^\xdd\xad\xbd\xb7{\xde\x0f#Q;6\x14\x1a" + - "P\x09_\xc13\x81\x92\xd6\xb8C\x09)4\x84\x86\xe9" + - "@!\x93\xa6\x1fI\x9a?\xd2\x99\x86Ni\xf3W\xc1" + - "\xd36-C!\xccd\xe8\x00\xdby\xde\xfd\xd4I\xc8" + - "\xb8S\xff\xe1\xb9y\xf6\xfdx\xbe~\xbf\xe7y\x1f\xed" + - "\xda\xd13&\xec\xce6\xfb\x00\xd4\x07\xb29\xff7\xdb" + - "?\xbe\xf4k\x17~t'\xc8%\xc1\xff\xe2K\x07\x8a" + - "\xef\xbaw\xfc\x0b\x00\xee\x19\xcc\xdd\x86\xcapN\x02P" + - "\xb6\xe7\x0e\x01\xfa\xff\xb0\xeb\xcc\xeb\xc7~\xfe\xe0\xdd " + - "\x970Y\x99\x91\x00\xf6\x94s\xcb\xa8\xdc\x94\x93@\xf4" + - "\xbfvs\xf1\xef\xf0\xe2/\x1e\x04\xf9:\x04\xc8\"}" + - "\xde\x9b\xdb \x00*3\xb9Q@\xff\xc7;_z\xf1" + - "\x81?\xfb\xd2\xef\x83\xfa\x09D\x08\xf6\xb7s\xff\x83\x80" + - "\xca\xed|\xc1}\xdf\x1b\xb0\xca\xffy\xec\xf1\x95\xf7\x04" + - "'=\x95\x1bA\xe5\x05\xae\xd4\xf3|\xf1\xc3\xff\xf4\xf2" + - "\xc1\xf6\x83\x8f^\x02\xf9\x13\xd1a\xaf\xe4\x04\x012\xfe" + - "\xde\x7f\xbe|h\xe6\xb9\x85'\x83/\xc1\xf6\x1f\xe6\x9e" + - "\xa3{^\xe5[\x7fpk\xe1\xde\xf2\xaf\xdf\xff$\xa8" + - "%L_\xc4\x0fy\x9f,\xda$\xd1OY\x1aB@" + - "\x7f\xf9\xc6\x97\x0f\xff\xfcw\x9do\x80:\x8c\x19\xff\xaf" + - "\xefy\xed\xf4\xf6\xa7\x16\xbe\x0f\xf3\x82\x84\"\xc0\x9eJ" + - "\xcf%:\xfa\xa6\x9eo\x02\xfa\xfd\x7f\xb1\xe3\xe0\xfd\xaf" + - "O\x7f\x8b\x8e\x16\xbamx\xb7g\x04\x95\xde^\xb2!" + - "\xdbK\xab\xff~\xe7\xe1\xef|\xe7\xd9\xe6\xb7\xba\x15\x11" + - "h\xf5\x13\xbd\x07Py\x81\xaf~\x9e\xaf\xdeT\xc1\x9f" + - "~ww\xe6\xcfC\xbbDZ\xa4o\xf8\x19]~v" + - "\x03-\xb8\xf9\xbd\xe7\xbf\xb7\xef\xcd\x9f\xbc\x90\x8e\xc0\x96" + - "\x8d\x02E`\xf7F2|\xcb\x1b\xe3\xfd\xe6\x9bw|" + - "\xb7\xcb\xc1\xfc\xa4\x9b6\x1e@\xa5\xbd\x91\xae\xd37\xd2" + - "i\xb1\xa1j\x093\xdd\xcae\xfbN\xa02\xd8G?" + - "7\xf5q/\x1d\xb8\xf9+\x0fe/\x7f\xe5\xfb\xdd\x96" + - "sOn\xe9\xb7Q\xd9\xddO?\x87\xfb\x9f\x14\x00\xfd" + - "\xd2\xb3\xbf\xf1\xa7\xe3\x8dW\x7f\xd4\xa5\x0a\x1d\xae\x0c\x16" + - "\xdeV\xae-\xd0\xafk\x0a\xb7\x02\xfa_\xfa\xe4\xd2m" + - "\x07\xaf]~\xa5\xdbM\xdc\xc0\xb3\x85eT.\xf0\xd5" + - "\x0f\xf2\xd5\xc2em\xf0\xfc?~\xe6\xa7\xa9\xc4x\xab" + - "\xf0\xaf\x08\x19\xff\xe0\xe1\x9bO\xf4\x9e}\xed\xb5tb" + - "\\.p\x07\xbe[ \xff|[~Hy\xe9\xf1?" + - "~\x9d.\x92\xba\x1d4(\x1fAeX\xa6\x9f\xdbe" + - "n\xc3\xdecev\xf4\x86/\xfc\x0c\xe4\x92\xb8\x02A" + - "\xbd\xc5\x11T\x06\x8b\xdc;E\x09\x95\xbf\xa1\x9f\xfe}" + - "\xcd#?|k\xe2\xf1\xff\xee>\x9d;\xf4\x19\xda\xf2" + - "2\xdf\xf2B\xd1'\x87\xee\xd9\xfd{o\\\xf8\xa3\x89" + - "\xb7V\x9d.\x0f\x8c\xa3r\xcd\x00Y\xbce\xe0\xb3\xca" + - "\xcc\x00?\xfc\x8b\x93\x87n\xdc\xfa\x97o\xa7m\xdb;" + - "\xf06\xd9V\x19 \xdb\x16n\xf8\xaf\xcf^{\xdf\xdf" + - "\xbe\xbd\x16\xb8\xf4\x81\x1d\xa8,\xf1\x13=Z\xfc\xe6\xfe" + - "?\xf8I)_zg-|<2p\x02\x95g\x06" + - "8&\x07x\xe4\x8d_y\xe5\xe47s\xaf\xffb\xad" + - "\x93_\xdd\\B\xe5?6\xd3\xc9\x977\x93\x1a\xd3O" + - "\xbf\xfa\x99\xd6\x85\x1f\xbc\xdb\x1dK\xee\xe2\xec\xe0\x1d\xa8" + - "\x0c\x0e\xd2\xeaM\x83\xff\x06\xe8_s\xf8O~\xfb\xaf" + - "~\xe7\x0f\xdf\x03\xf5\x93(%\xe1\x99\x17%\x14\x08P" + - "\x83\x1c\xd5\xbd%\x8a\xfb\x997\x1f\x99\xba\xff\xe8\xd3\x1f" + - "\xa4=\xc0J/\xd2\x02\xafDW\x9f\xb8p\xc6\x9d\xfa" + - "\xea\x97\xfd5Rn\xcf\x85\xd28*O\x94\xe8\xe6\xc7" + - "K\xb7\xc2\xb0\xefz\xa6\xc9\x0c\xbb\x93\xa9\xffj\xf4\xb3" + - "~}]\xeb\x98\x9d\x91}\x8b\xba\xe3\xeafs\x8e\xcb" + - "G\xab\x96\xa1\xd7\x97\xaa\x88j\x1f)%o\x19\x01@" + - "\x947\x1d\x01@A\x96\xc7\x01F\xf5\xa6i\xd9\xcco" + - "\xe8N\xdd2M\x06b\xdd=w\\34\xb3\xce\xe2" + - "\x8b\xb2\xab/\x9ab\x86a}\xde\xb2\x8d\xc6![o" + - "\xea\xe6\x84e.\xe8M\x80*b\xbcMZ\xbdm\xc2" + - "\xd0\x99\xe9\xd6\x98}Z\xaf\xb3\xeb=\x87\x05\xfb<[" + - "su\xcb\xdc6\xcb\x1c\xcfp\x1d\x005#f\x002" + - "\x08 \xf7\x8f\x00\xa8=\"\xaaE\x01Gm\xbe\x00\x0b" + - "\x09\xee\x00\xb1\x00\xc9\x9d\xb9\xd5w\x06\xbe\xa0;\x99}" + - "\xbdg\xda\xac\xa9;.\xb3\x03\xf1\xb6\xd1\xaafkm" + - "'}\xe1\xa3\x00jAD\xf5\xe3\x02\xfaM[\xab\xb3" + - "*\xb3Q\xb7\x1a\x075\xd3\xaa\x89\xac\x8eY\x100\x9b" + - "\xbaT\\\xc3P\xfa\x7fjtn\xae:?;M\x01" + - "H\x9d\xbf51H\xf2l\x03\xfb@\xc0\xbe\xd4q\xfd" + - "WkC\xe4\xb6x\xd7\xfa\xfby\xa0\xeb\xee\xb6\xea\xd0" + - "*\xd3\x8f\x00\xa8}\"\xaa\x9b\x05\xf4;\xf4\x95\xb9\x0c" + - "D\xdb\xc1BR\x99\xba<\x9e\xfd\x10\xe3'\x82[\xaa" + - "\xe1)\xb6\xc3\x93C-\xc6\x97\x9d\xa5\xcb\xce\x88\xa8\xde" + - "-\xa0\x8cX\xa4\xc2+\xdfe\x03\xa8w\x8a\xa8> " + - " \x0aE\x9e\xb5_\xbeDm\x81\x88\xeac\x02\xca\xa2" + - "P\xa4\xd2&?\xb2\x03@}XD\xf5Y\x01\xe5\x8c" + - "X\xc4\x0c\x80\xfc\x0c\x85\xeeY\x11\xd5\x97\x04\xf4\xad " + - "1I\x7f\x17\xfbA\xc0~@\xbfnX^c\xc1\xd0" + - "`\xc8f\x8d\xcad,7\xbdv\xd5f\xa7u\xb4<" + - "\xa7\xec\xba\xac-u\\\x07s `\x0e0\xefjM" + - "\x07?\x06X\x15\x11\x0bI}\x00$a|&\xda\xac" + - "q\x98\xd9\x8e.Z\xe6\xaa\xa0\xae\xe1\xa6Yv\x9a\xd9" + - "\x0e\xab\xda\xd6\xe2R\x80\x83\xeb-[\x97\x9a\xba\xa9\xf6" + - "\x89\x99\x8f\xfb~\xe8\x93}d\xea\x98\x88\xea\xb4\x80[" + - "\xf0\x03\x12\x93[*\xb3\x00\xea\x94\x88\xea\x9c\x80[\x84" + - "\xf7IL\x8eQ\xc9\xadU\x11\xd5\xa3\x02\xe6[\xae\xdb" + - "\xc1B\xc2Na\xecne\xc7\x1d\xab~\x92\x01\x12\x96" + - "b\xde\x0b\xbf\xb6Bl\x83h4\xb0\x90\xb4X]\x81" + - "_+\xeby\xccG\xdd}\xb6m\xd9\x9cv\xe2h\xef" + - "\xfbTbD\x14\xec\xca\x91\xc4\x02Y\x18\x0b\xccR\x8f" + - "'\xfa\x0f\xd55\xcfa\xb1/m\xe6\xdaK\xe5\x05\x17" + - "Df\xc7 tZ\x96g4f\x19H\xae\xbd\x84\x08" + - "\x02bJ\xc958r\xd2\x9aJ\xb9<\xc8\xca\x94\x9e" + - "\xa4\xd3\xa4\x88j5\xd1s\x86d\xd3\"\xaa_ =" + - "C\xf7\xcf\x93\xfb\xe7DT;\x02\xfa\x06\xc1\xd1\x9c\xb2" + - "@t\xdcX\xdd@X\xb5x\x02J \xa0\x04\xe8{" + - "\x1d\xc7\xb5\x99\xd6\x06\x8c3\x8a\xd6\x7f\xec*8\xac\x0b" + - "\xfdU-\xcfa\xbc\xb6\x0d1\xb2f\x0e\xa4\x8d\x08\xa1" + - "5?\x9e8{m\xc0\xb4,\xc75\xb56\x03\x80\xc8" + - "\xb0sV\x87\xd8\x9aH!\xee\xac\xbar\xe3\xea\xa9?" + - "\xa0\xe1\x15\xc4\x7f)\xc5\xc3\xf5p7\xf2\xed\x13\x96)" + - "-\xe8M,$\x8dK\x97\x02k\xc4\xbd\xec\xb9-f" + - "\xbaz\x9d_\xb8*\xee[\x93\xfc\x8c}V\xf9T\xca" + - "\x91\x91\xcff\x8e'\x8e\x94N\xb2\xa5\xc8-C\xac\xad" + - "\xe9\x09\x9b\x87\xde,\x83\xf4\xb9d\xcdz\xfa\xad\xa4\x83" + - "<9\xa7\x8b0oK\x08\x93\xfe%\xdd\xbf|\xd7\x08" + - "\x08<3\x89\x05\xdb\x14\xd4\x96\x88\xaa\x1b\xf2e\x16@" + - ">\xf5\x10\x80\xea\x8a\xa8\x9e\x0f\xf92G'\x12_\x9e" + - "\x17Q\xbdW\x88T\x9b\xb2`4\x88x\xa4\xf3h`" + - "\xc99B\x9f\xce\x1c\xec\x05\x01{\x89\xf6\x02\x82\xd7\xd1" + - "2\xe7\xf46\xb3" + - "d\x96\x1b\x0d\x9b\xba\xc1>\xdf_\xd5\x8c\xf6\xe3\x07~" + - "\xd8\x10\xeeHu\xa3\xe1h'\xfe\xc3T\x90\xfcy\xcf" + - "\xd4\x17\xb1\x90\x0c\xbe\xaf<\xad[s68;\xca>" + - "\x12\xf4\x92\xc9\xf5\x95{\xbap\"\x186\xc0]\xfd\xef" + - "\xd6\xb5\xba\xef#I\x03\xcc3\xbc\x90\xfc\x11+\xbc\xce" + - "\x09\xdbP\x10\x17\xac\xd5\x1d\xe6\xff\x06\x00\x00\xff\xff\xf9" + - ":k\xec" +const schema_db8274f9144abc7e = "x\xda\xacY}\x8c\x1c\xe5y\x7f\x9ey\xf7v|\xe6" + + "\xce{\xe3\xb9\x80}\xd8\xba\xd6\x02%\x10Lq\\Z" + + "\xb8\xb6Y\xdf\x9d\xcf\xb9\xbd\xf8c\xe7\xf6\xce\x80\xb1%" + + "\x8fw\xdf\xdb\x1b{vf=\x1f\xf6\x9d\xe5\xc4`\xd9" + + "\x05\xae\x10l\x82%\xec\x90\x08\xdc\xba|\x08\x1aC@" + + "\x15\xd4\xa4\xa1jK\xda\xa8\"U\x93\xaai\xf3O\x03" + + "VU\xd4\x88\x9a\xa4B\xa9\x80\xa9\x9ew>oo9" + + "\xdbU\xf8\x03\x8f\x9e}\xde\xf7}>\x7f\xcf\xc7\xddv" + + "n\xc9\x06i]\xc7_v\x01h\x8fw\xe4\x83\xdfo" + + "\xbc}\xf6wN\xfd\xe0\x18(}R\xf0\xd5\x0bc\xbd" + + "\xbf\xf2\x8e\xfe\x1b\x00\xae\x1f\xc9\x1fB\xf5\x9e\xbc\x0c\xa0" + + "N\xe6\xb7\x01\x06\xfft\xdb\xe1ww\xff\xe2\xe4\x83\xa0" + + "\xf4a\xca\x99\x93\x01\xd67\xf2s\xa8\x1e\xcf\xcb\xc0\x82" + + "o\xde\xdb\xfb\xf7\xf8\xd4\x87'A\xf9\x1c\x02t \xfd" + + "\xac\xe7\x97J\x80\xeal\xbe\x08\x18\xbc}\xcb\x85\xd7O" + + "|\xe7\x81o\x80\xf6YD\x08\xcf\x9f\xce\xff/\x02\xaa" + + "/\x0a\x86K\x7f\xf2\xf9\xdc\x8bo/\xff\x96`\x08\xce" + + "\xfd\xe3]/\x9f\xf8\xceo\xbc\x07\x93\x92\x8c9\x80\xf5" + + "?\xce;\xc4\xfb\xef\xf9\xff\x00\x0c\x1e\xff\x977\xb66" + + "N\x9e9\x0b\xcag\xe3\xbb\xde\x94%\x09r\xc1\xed\xff" + + "zq\xdb\x96\x97\xa7\x9e\x09\x7f\x09\xe5xU~\x99\x8e" + + "\xfe\x8dL\xcf|\xff`\xcf\xc3\x83\xbf\xfb\xe83\xa0\xf5" + + "aF\x9f\x0eq\xc9\x7f\xcas\xa8\xe2\x12\xfa\xfcX\xee" + + "G\xc0`\xee\xce7\xb6\xff\xe2\x0f\xdd\xe7A[\x8b\xb9" + + "\xe0\xaf\x1fz\xe7\xc0M\xcfM\xbd%\xa4b\x00\xebo" + + "\xef\xfa" + + "\xee\xe6\x97\xe8\xea\x8cQC!.v\x0e\xa0\xfa?\x9d" + + "d\xd7K\x82\xfb\x87\xb7l\xff\xeew\xcf\xd7_j\x15" + + "D\"\xee\x93K\xc7P=\xb7\x94\xb8\x9f^J\xdc\x9f" + + ")\xe1O\xbf\xb7.\xf7\xe7\x91^\x8c\x98&\xafy\x8f" + + "\x1e7\xae!\x86{?z\xf5\xafF\xde\xff\xd1kY" + + "\x07tvI\xe4\x80\xd5]\xa4\xf8\xea\x9f\x0fu[\xef" + + "\x1f\xfd\xde|?\x867\x8dt\x8d\xa1zO\x97pz" + + "\xd7\xb7\x01?|\xfe\x81\x13\xa5w6\xbe\xa5\xf5a\xae" + + "U\x91K]\x87P\xed\xe8\xa6O\xec\x166J\xac\xd2" + + "\xc2.4Y\xb7l/\xaa#\xcb\xe8sp\x99`\x1f" + + "\xbb\xf7\xeb\x8fu\\\xfc\xfa[\xadf\x92\x89\xa7Tp" + + "P\xddU\xa0\xcf{\x0a\xcfH\x80A\xdf\xf9\xdf\xfb\xb3" + + "\xa1\xdaO~\xd0\"7]\xae\x8e,\xff@\xd5\x96\xd3" + + "\xd7\x96\xe5\x07\x01\x83\x07>?{h\xeb\x8ds?n" + + "\xb5\xa9\x10\xfc\xb9\xe5s\xa8\xbe)\xb8\xdf\x10\xdc\xd2E" + + "}\xe5}\xff\xfc\xc5\x9ff\xa2h\xad\xfa3\x84\\\xb0" + + "u\xfb\xbd{;\xbf\xf2\xce;\xd9(\xfaMUX\xfb" + + "v\x95\x8c\xf9\x8a\xf2\x98z\xe1\xe9?}\x97\x1e\x92[" + + "\xad9\xa9\xee@\xd5P\xe9\x93\xabB\x87$\x9a\xdb\xf9" + + "Z\xbfv\x00\xd5\xfd\xd7\x92\\\x8dkI\xae\xdbw\x0f" + + "\xf2\x9dw\xdc\xfd\x1e(}l^n\xbeH\x9co\x10" + + "\xe7\xfa\xd7\xae\x95Q5\xae\x93\x01\x82\xaf\xd5w\xfc\xdd" + + "\xa5\xe1\xa7\xff\xbbmDk\xd7\x0d\xa0\xaa\x13\xdf\xfa]" + + "\xd7\x09\xf3\xaf_\xf7G??\xf5\xc7\xc3\x97\x16\xdc\xfe" + + "\xdc\x8a!T_[Ar\xbc\xba\xe2K\xea\xc5\x15\xe2" + + "\xf2\xafn\xdcv\xe7\x9a7?\xc8Z\xe2\x1fV| " + + "Rq\x05Yb\xea\x8e\xff\xfa\xd2\x8d_\xfb\xdb\x0fZ" + + "\xdc#\x18q\xe5\xcd\xa8*+\xe9\xc6\xee\x95E\xc0\xf7" + + "7}\xebG}\x85\xbe_\xb6\x13t\xddJ\x8a\x93\x95" + + "\"NV\x0aA\xef\xfe\xd9\x99\x83\xc5o\xfc\xf2C\xd2" + + "\x8b\xb5 \xcf\xfe\xbe\x1d\xa8\x1e\xef\xa3\x9b\xef\xef\xa3\xf0" + + "\xdf\xfc\xc2O\xbe8}\xea\xfb\xbfj5\x82p\xc8\xda" + + "\xeb\x8f\xa2:x=q\xff\xc1\xf5\x84\x1f\x87\xdf?=" + + "\xfa\xe8\xce\x17>\xc9ju\xe3\xaa\xd7\x85\x7fW\x91V" + + "{O\x1d\xf6F\x9fx$h\x13t\xeb'W\x0d\xa1" + + "\xcaW\xd1m\xfa\xaa\x83\xb06\xf0|\xcb\xe2\xa6\xd3\xcc" + + "U\x7f+\xfe\xac\xdeZ\xd5\x9bVs`d\xc6p=" + + "\xc3\xaaO\x08z\xb1l\x9bFu\xb6\x8c\xa8u\xa1\x04" + + "\xa0\xac\x1e\x00@T>\xb3\x03\x00%E\x19\x02(\x1a" + + "u\xcbvxP3\xdc\xaamY\x1cX\xd5;\xb2G" + + "7u\xab\xca\x93\x87:\x16>4\xcaM\xd3\xbe\xcbv" + + "\xcc\xda6\xc7\xa8\x1b\xd6\xb0mM\x19u\x802br" + + "L^xl\xd84\xb8\xe5U\xb8s\xc0\xa8\xf2[}" + + "\x97\x87\xe7|G\xf7\x0c\xdb\xbaa\x9c\xbb\xbe\xe9\xb9\x00" + + "Z\x8e\xe5\x00r\x08\xa0t\x0f\x00hK\x18j\xbd\x12" + + "\x16\x1d\xc1\x80=i\xe6\x01b\x0f\xa4o\xe6\x17\xbe\x19" + + "\xda\x82\xde\xe4\xce\xad\xbe\xe5\xf0\xba\xe1z\xdc\x09\xc97" + + "\x14\xcb\xba\xa37\xdc\xec\x83g\x00\xb4\x1e\x86\xda*\x09" + + "\x83\xba\xa3Wy\x99;h\xd8\xb5\xad\xbaeW\x18\xaf" + + "b\x07H\xd8\x91y\xb4\x8d#6\xe9\x86\xc9k\xa1v" + + "\xb7V\xfb\xc5\xbfZ\x0f\xcbu\x05\x81xD\xdf\x01\xa0" + + "\xedf\xa8\x99\x12v\xe3'A/\x15)\xc58\x04\xa0" + + "M3\xd4<\x09\xbb\xa5\x8f\x83^\xe1\xb5\xfdk\x004" + + "\x93\xa16#a7\xfb(\xe8\xa5R\xa0\xf8{\x014" + + "\x8f\xa1v\x9f\x84\x81\xeb7\xc9\xa6.0\xdb\xc1\x9e4" + + "\x94#\xeb\xf0Z\x9d,mA\x91W\xc9\xd0\xd8\x13#" + + "n\xc8 \xd7\xeci\xecIKDt\xcc\xe1\x07\xb8\xe3" + + "\xf22\x14\x1c{f\x16{R\xe4m\xb1z\xf7\xd5Z" + + "=vtrj\xf1\xf3\"4\xab\xde\x0d\xe5\xfe\x05\xce" + + "\";v1\xd4VH\x184\xe9W\xeeq`\x8e\x8b" + + "=i\xe9m\x91\xb6M8\x0f\xd3\xff\x87\xc3W\xca\xd1" + + "-\x8e+\xc2Y\xebM\x1e\xfb\x0a=v\x98\xa1\xf6\xa0" + + "\x84\x0ab\xe8\xb3\xe3\x0e\x80v\x8c\xa1vBB\x94B" + + "\x8f=r\x16@;\xc1P{RB\x85I\xa1\xc3N" + + "\xdfL\xbd\x10C\xed\xbc\x84J\x8e\xf5R\x9b\xa1\xbcH" + + "\xc1v\x9e\xa1vA\xc2\xc0\x0eS\x89\xe4\xf7\xb0\x1b$" + + "\xec\x06\x0c\xaa\xa6\xed\xd7\xa6L\x1d\xfa\x1d^+mL" + + "\xe8\x96\xdf(;\xfc\x80\x81\xb6\xef\x0ez\x1eo\xc8M" + + "\xcf\xc5\xb5\x08\xd0\xc6\xef" + + "\x83\xbe7\xcd-\xcf\xa8\x8a\x07\x17\xf8}M\x1a\x9f\x89" + + "\xcdJ_\xc8\x182\xb6\xd9\x96=\xa9!\xe5}|6" + + "6K?o\xe8\x86\x99x?\xb2\xe6 \xc8_Ny" + + "\x16\xed6\xa2\xb2\x12\x16\x95bh\x9e\x16\xc8\x9c\x03\xd0" + + "\xeec\xa8=\x9c\x11\xf2\xa1\xc7\x00\xb4\x87\x19jOd" + + "\x84<5\x94\xc5L\x16a&Y\xf4I\x86\xda\xb3\x12" + + "b.\x84\xccs\x04\x99\xcf2\xd4^\x91\x04\x0a\x8e\x0e" + + "\x0e\xdb\x16FB\xb8\x001\x06\x06\xd3\\w\xbc=\\" + + "G\xafdy\xdc9\xa0\xa3\x19\xe7\xe0\x11\xcfhp\xdb" + + "\xf7\x92\x9cl\xe83\xa2dcm4<%\xeb\x9e\x8b" + + "\x9d a'\xa5\x80\xcb\x9da\x87\xd7\x90\xbc\xa1\x9be" + + "\x9dy\xd3Wb\xa0\xf9xYhc\x9eCiE\xa1" + + "\xff\xd2\xf9O9>\x00\x92H]\xd2\xb91\x946\x06" + + "\xa2\xa0tP_\xf0X\xda\x01\x88\x82\x92\xa7\x1b\xcf\xa4" + + "\x06\x8fD\x1b\xb5\xa1\x18\xa6D,s1t\xf5\x11\x82" + + "'\x83\xa7zFu\xd6@\xdb\x9a\x10\x06\xc2\xd4BU" + + "\xbb\xd1t\xb8\xeb\xa2a[\x9a\xaf\x9b\x06\xf3f\x93\x83" + + "\x8b\xda\x80r?\xcc\x99m\xcd~\xe1$2\xc2m\xb1" + + "\x11\xd4A\x1c\x03\xa8l@\x86\x95\xcd\x98\x86\x89Z\xc2" + + "!\x80\xcaF\xa2\x971\x8d\x14u\x0b\xf6\x01TF\x89" + + ">\x81\x12b\x18+\xaa\x86\xcf\x03T&\x88\xbc\x1b\xd3" + + "\x12\xab\xee\x12\xd7\xef$\xfa4\xd1;r\xc2|*\xc7" + + "\x9b\x01*\xbb\x89~\x98\xe8yIXP\x9d\xc5\xbd\x00" + + "\x95\x19\xa2\x1f#\xba\xdc\xd1\x8b\xa2\xf1G\x07\xa0r\x1f" + + "\xd1\x1f&\xfa\x92\x15\xbd\xb8\x04@}H\xd0\x1f$\xfa" + + "\xe3D\xef\\\xd9\x8b\x9d\x00\xeaI<\x0aP9A\xf4" + + "'\x89\xbe\x14{q)\x80z\x1a\xcf\x00T\x9e$\xfa" + + "\xb3D\xbf&\xdf\x8b\xd7\x00\xa8\xe7\x84" + + "k\x09!?\xc7P\xfbm\x09\x0b\xd9\xa4\xe8?\xa0\x9b" + + ">\xbf\x926h\xb2\xa5\x14\x84\xddl\x88\xcf\x99\xd7\x87" + + "\xd2\xd7\x93\xc7\xa9Y\xbc\x85\xa16*\xe1\x11\xd7\xafV" + + "I\xe9\xd8\x0aS\xd1\xcc\x00\xfdtw\xc6\x1f\xc98\x1f" + + "\xf9\xe3J\xcbn\x9d{\xe1W\xc9\x9a\xb2\xa9^\xc9z" + + "\xc3\xfd\x7f\x9e\x1e\xe7n\x81Z\xf6\xcbNf\xc9\x80~" + + "\xf9\xfa6:1QN\xc7G\x16\x82c\x16\x17\xc6\xb3" + + "\xb8\x90\xc2\xc2\xdel\xfa\xc7m\x98\xaa\x89<,\x13}" + + "'\xa6}\xb7z\x0f\x9e\x9d\x97\xff\xb9\xc1\x10\x17\xb8\xb8" + + "\xbeF\xf4\xa6\xc0\x05\x0cq\xa1!\xee7\x89>\x93\xc5" + + "\x05\x1f\xe7\xe6\xe3\x02\x8bq\x81\xf2\xf9\x18\xd1O\x08\\" + + "\xc8\x85\xb8\xf0\x08\xbe\x8cq\x87\xc0\xac\xb4\xd2\x18\xf4\x8bmY\x18\x96" + + "\xed\x09\xa3\x7f~=nF\xfdn\\\xd7'\x8a-\x05" + + "\x9b\xcf4y\xd5\x1b\xb6\xd1\xf2\x0c\xcb\xe7\x0b.\xa8N" + + "\xfb\xd6>^\x1bA\xabj\xd7\x0c\xab\x0e\x0b\x1am\xf6" + + "iS{\xa6\x91\x11\xd9\x8c\x99e\xb1r\x13\x95e\x8c" + + "\xca\xb22\x90N\x9f\xc5\xaa8Ut\xb8\xee\xb6\x99\xa6" + + "\xd8\xa7e[1L2z\xad\x87u\x00$\xabW\x8c" + + "wa\xca\xfeC )\x86\x8c\xe9\x0a\x11\xe3\x8d\xa1\xb2" + + "\xcb\x01I\x99\x94QJ6\xde\x18o\xab\x95\xd2\x1cH" + + "\xca\x88\x8c,YSc\xbc\x89R\xee\x1c\x02IY+" + + "\x07qk\x0e\xc5P\x9c\x0d\x18\xc4\x89\x0f\xfd\"\xf57" + + "`\x10\xcf\xef\x18\xb7\xf0\x00\x1b\xf0HT\x166`v" + + "\xe9\xc3>\xad\x8fn\xdf\x1e\x12F\xce0\xd4\x8e\xa5\x18" + + "y\xff\\:P'\xb3\xcb#\xcf\xb7\x9b\xa8\x8f\x02h" + + "O\x84\x9d`2Q\xbfD-\xe3+\x0c\xb5\x1fJi" + + "\xbd\x8c\xc3.\xde\x93\xa0\xed\xc4\xc3\xd4\"\xeb\x92(8" + + "\xa3\xce\xadui\x12\xd4\xeci\xd1\xd9ax\x95\x0b)" + + "bg7)\xcb2\x9b\x14\x8c\xc78y\x1e\xc0g\xf7" + + "*\xcb\x16\xc7\xccyC\x89\xa889\x115\xf1>\x1e" + + "\xe3\xbf\x8c(\x0ay\xbf[\x0e\xe2\xc1\x05\xe3rE\xce" + + "\xcb\xba\xec*\xa7\xb7q\xde\xef^I%\x88\xb7\xaf\x97" + + "\x1f\xc2\xc3w\x0a\x14l\xa1B\xc9\xbd{3\xdb\x1d\xd3" + + "\x8e\xe6\xa0\xc2\xd6L\xd5^\xccV\xa1\xc0q\x03Z\xa0" + + "\xc3-\xe1\xb7&\x0d\xbf\xa4A\xb8\x7fMf\xcb\x13O" + + "'\xc7\xc7\xa2\xa0|*i8\x95oR\xa0>\xc5P" + + "{!\x13~\xcf\x8d\xa5\xd3\x89\xcc\x1d'\x96S\xf6\x9d" + + "\x146M\xbb\xbe\xd9\xb0\xb8K-X\xcbH\xdd\xe4N" + + "C\xb7\xb8\x85\x1e\x81\x91\xef\x10\xa2\xceG\xae\xd2\xc6L" + + "\xe7\xb6\x98\xfa\x95(\xd8\xc3X\x8f\xcakf~<\x9b" + + "\xd9e\xc4\xcak\xafG;\x82\xdd\x19\xe5w\xd1\xfc\xb8" + + "\x93\xa16-a\xa0\xfb\x9e=\xd9\xac\xe9\xe8\xf1M\x0e" + + "\xdf\xefs\xd9\xaa\xce\xa6s\x14M\x14Uw\x12\x9b\xd4" + + "\xfbmrxq\xbf\xcf\xb3\x0c\xf1B\x15d\xc3\xae-" + + "\xd8\xa4\xb6i\xb6\xee\xe2{*vu\x1f\xf7\xe6-\x9a" + + "C\xb8\x8cU\xd1\xc7\xd3mj\xac\x891\x9e\x19\x99b" + + "\x18\xd9O\x01\xd5d\xa8\x1d\xce\xc0\xc8\xec\\\xea\xf0\xf6" + + "\xd5\xf5\xd7S\x10\x17Q\xb2\xed\xb6s\xbc\xc8\xaf(\xd1" + + "\xd2\xbf\x1e\\\xbe\xe5\x8a\xc6\xf5\xa8cmiX\xd7\xb4" + + "k\x97wD\x1d\xeb\x1dQ<\xf7\xa4\x7fw\x8c\x9es" + + "\xa3.\x11\xd8\x94\xbd\xb0\x01\xfc\xbf\x00\x00\x00\xff\xff\xde" + + "T\x04\xc0" func init() { schemas.Register(schema_db8274f9144abc7e, @@ -3345,7 +3728,7 @@ func init() { 0x8891f360e47c30d3, 0x91f7a001ca145b9d, 0x9b87b390babc2ccf, - 0xa160eb416f17c28e, + 0x9e12cfad042ba4f1, 0xa29a916d4ebdd894, 0xa766b24d4fe5da35, 0xa78f37418c1077c8, @@ -3355,6 +3738,7 @@ func init() { 0xb70431c0dc014915, 0xb9d4ef45c2b5fc5b, 0xc082ef6e0d42ed1d, + 0xc744e349009087aa, 0xc766a92976e389c4, 0xc793e50592935b4a, 0xcbd96442ae3bb01a, @@ -3362,15 +3746,15 @@ func init() { 0xdc3ed6801961e502, 0xe3e37d096a5b564e, 0xe4a6a1bc139211b4, + 0xea20b390b257d1a5, 0xea58385c65416035, 0xf0a143f1c95a678e, 0xf143a395ed8b3133, 0xf2c122394f447e8e, 0xf2c68e2547ec3866, 0xf41a0f001ad49e46, - 0xf7e406af6bd5236c, + 0xf7f49b3f779ae258, 0xf9c895683ed9ac4c, - 0xfc9f83c37bab5621, 0xfeac5c8f4899ef7c, 0xff8d9848747c956a) } diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE new file mode 100644 index 00000000..14127cd8 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/LICENSE @@ -0,0 +1,9 @@ +(The MIT License) + +Copyright (c) 2017 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md new file mode 100644 index 00000000..195333e5 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/README.md @@ -0,0 +1,41 @@ +# Windows Terminal Sequences + +This library allow for enabling Windows terminal color support for Go. + +See [Console Virtual Terminal Sequences](https://docs.microsoft.com/en-us/windows/console/console-virtual-terminal-sequences) for details. + +## Usage + +```go +import ( + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func main() { + sequences.EnableVirtualTerminalProcessing(syscall.Stdout, true) +} + +``` + +## Authors + +The tool is sponsored by the [marvin + konsorten GmbH](http://www.konsorten.de). + +We thank all the authors who provided code to this library: + +* Felix Kollmann +* Nicolas Perraut + +## License + +(The MIT License) + +Copyright (c) 2018 marvin + konsorten GmbH (open-source@konsorten.de) + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the 'Software'), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod new file mode 100644 index 00000000..716c6131 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/go.mod @@ -0,0 +1 @@ +module github.com/konsorten/go-windows-terminal-sequences diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go new file mode 100644 index 00000000..ef18d8f9 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences.go @@ -0,0 +1,36 @@ +// +build windows + +package sequences + +import ( + "syscall" + "unsafe" +) + +var ( + kernel32Dll *syscall.LazyDLL = syscall.NewLazyDLL("Kernel32.dll") + setConsoleMode *syscall.LazyProc = kernel32Dll.NewProc("SetConsoleMode") +) + +func EnableVirtualTerminalProcessing(stream syscall.Handle, enable bool) error { + const ENABLE_VIRTUAL_TERMINAL_PROCESSING uint32 = 0x4 + + var mode uint32 + err := syscall.GetConsoleMode(syscall.Stdout, &mode) + if err != nil { + return err + } + + if enable { + mode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } else { + mode &^= ENABLE_VIRTUAL_TERMINAL_PROCESSING + } + + ret, _, err := setConsoleMode.Call(uintptr(unsafe.Pointer(stream)), uintptr(mode)) + if ret == 0 { + return err + } + + return nil +} diff --git a/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go new file mode 100644 index 00000000..df61a6f2 --- /dev/null +++ b/vendor/github.com/konsorten/go-windows-terminal-sequences/sequences_dummy.go @@ -0,0 +1,11 @@ +// +build linux darwin + +package sequences + +import ( + "fmt" +) + +func EnableVirtualTerminalProcessing(stream uintptr, enable bool) error { + return fmt.Errorf("windows only package") +} diff --git a/vendor/github.com/sirupsen/logrus/.gitignore b/vendor/github.com/sirupsen/logrus/.gitignore index 66be63a0..6b7d7d1e 100644 --- a/vendor/github.com/sirupsen/logrus/.gitignore +++ b/vendor/github.com/sirupsen/logrus/.gitignore @@ -1 +1,2 @@ logrus +vendor diff --git a/vendor/github.com/sirupsen/logrus/.travis.yml b/vendor/github.com/sirupsen/logrus/.travis.yml index a23296a5..848938a6 100644 --- a/vendor/github.com/sirupsen/logrus/.travis.yml +++ b/vendor/github.com/sirupsen/logrus/.travis.yml @@ -1,15 +1,25 @@ language: go -go: - - 1.6.x - - 1.7.x - - 1.8.x - - tip +go_import_path: github.com/sirupsen/logrus +git: + depth: 1 env: - - GOMAXPROCS=4 GORACE=halt_on_error=1 + - GO111MODULE=on + - GO111MODULE=off +go: [ 1.11.x, 1.12.x ] +os: [ linux, osx ] +matrix: + exclude: + - go: 1.12.x + env: GO111MODULE=off + - go: 1.11.x + os: osx install: - - go get github.com/stretchr/testify/assert - - go get gopkg.in/gemnasium/logrus-airbrake-hook.v2 - - go get golang.org/x/sys/unix - - go get golang.org/x/sys/windows + - ./travis/install.sh + - if [[ "$GO111MODULE" == "on" ]]; then go mod download; fi + - if [[ "$GO111MODULE" == "off" ]]; then go get github.com/stretchr/testify/assert golang.org/x/sys/unix github.com/konsorten/go-windows-terminal-sequences; fi script: + - ./travis/cross_build.sh + - export GOMAXPROCS=4 + - export GORACE=halt_on_error=1 - go test -race -v ./... + - if [[ "$TRAVIS_OS_NAME" == "linux" ]]; then go test -race -v -tags appengine ./... ; fi diff --git a/vendor/github.com/sirupsen/logrus/CHANGELOG.md b/vendor/github.com/sirupsen/logrus/CHANGELOG.md index 8236d8b6..51a7ab0c 100644 --- a/vendor/github.com/sirupsen/logrus/CHANGELOG.md +++ b/vendor/github.com/sirupsen/logrus/CHANGELOG.md @@ -1,3 +1,90 @@ +# 1.4.2 + * Fixes build break for plan9, nacl, solaris +# 1.4.1 +This new release introduces: + * Enhance TextFormatter to not print caller information when they are empty (#944) + * Remove dependency on golang.org/x/crypto (#932, #943) + +Fixes: + * Fix Entry.WithContext method to return a copy of the initial entry (#941) + +# 1.4.0 +This new release introduces: + * Add `DeferExitHandler`, similar to `RegisterExitHandler` but prepending the handler to the list of handlers (semantically like `defer`) (#848). + * Add `CallerPrettyfier` to `JSONFormatter` and `TextFormatter (#909, #911) + * Add `Entry.WithContext()` and `Entry.Context`, to set a context on entries to be used e.g. in hooks (#919). + +Fixes: + * Fix wrong method calls `Logger.Print` and `Logger.Warningln` (#893). + * Update `Entry.Logf` to not do string formatting unless the log level is enabled (#903) + * Fix infinite recursion on unknown `Level.String()` (#907) + * Fix race condition in `getCaller` (#916). + + +# 1.3.0 +This new release introduces: + * Log, Logf, Logln functions for Logger and Entry that take a Level + +Fixes: + * Building prometheus node_exporter on AIX (#840) + * Race condition in TextFormatter (#468) + * Travis CI import path (#868) + * Remove coloured output on Windows (#862) + * Pointer to func as field in JSONFormatter (#870) + * Properly marshal Levels (#873) + +# 1.2.0 +This new release introduces: + * A new method `SetReportCaller` in the `Logger` to enable the file, line and calling function from which the trace has been issued + * A new trace level named `Trace` whose level is below `Debug` + * A configurable exit function to be called upon a Fatal trace + * The `Level` object now implements `encoding.TextUnmarshaler` interface + +# 1.1.1 +This is a bug fix release. + * fix the build break on Solaris + * don't drop a whole trace in JSONFormatter when a field param is a function pointer which can not be serialized + +# 1.1.0 +This new release introduces: + * several fixes: + * a fix for a race condition on entry formatting + * proper cleanup of previously used entries before putting them back in the pool + * the extra new line at the end of message in text formatter has been removed + * a new global public API to check if a level is activated: IsLevelEnabled + * the following methods have been added to the Logger object + * IsLevelEnabled + * SetFormatter + * SetOutput + * ReplaceHooks + * introduction of go module + * an indent configuration for the json formatter + * output colour support for windows + * the field sort function is now configurable for text formatter + * the CLICOLOR and CLICOLOR\_FORCE environment variable support in text formater + +# 1.0.6 + +This new release introduces: + * a new api WithTime which allows to easily force the time of the log entry + which is mostly useful for logger wrapper + * a fix reverting the immutability of the entry given as parameter to the hooks + a new configuration field of the json formatter in order to put all the fields + in a nested dictionnary + * a new SetOutput method in the Logger + * a new configuration of the textformatter to configure the name of the default keys + * a new configuration of the text formatter to disable the level truncation + +# 1.0.5 + +* Fix hooks race (#707) +* Fix panic deadlock (#695) + +# 1.0.4 + +* Fix race when adding hooks (#612) +* Fix terminal check in AppEngine (#635) + # 1.0.3 * Replace example files with testable examples diff --git a/vendor/github.com/sirupsen/logrus/README.md b/vendor/github.com/sirupsen/logrus/README.md index 4f5ce576..a4796eb0 100644 --- a/vendor/github.com/sirupsen/logrus/README.md +++ b/vendor/github.com/sirupsen/logrus/README.md @@ -56,8 +56,39 @@ time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4 time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009 time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true -exit status 1 ``` +To ensure this behaviour even if a TTY is attached, set your formatter as follows: + +```go + log.SetFormatter(&log.TextFormatter{ + DisableColors: true, + FullTimestamp: true, + }) +``` + +#### Logging Method Name + +If you wish to add the calling method as a field, instruct the logger via: +```go +log.SetReportCaller(true) +``` +This adds the caller as 'method' like so: + +```json +{"animal":"penguin","level":"fatal","method":"github.com/sirupsen/arcticcreatures.migrate","msg":"a penguin swims by", +"time":"2014-03-10 19:57:38.562543129 -0400 EDT"} +``` + +```text +time="2015-03-26T01:27:38-04:00" level=fatal method=github.com/sirupsen/arcticcreatures.migrate msg="a penguin swims by" animal=penguin +``` +Note that this does add measurable overhead - the cost will depend on the version of Go, but is +between 20 and 40% in recent tests with 1.6 and 1.7. You can validate this in your +environment via benchmarks: +``` +go test -bench=.*CallerTracing +``` + #### Case-sensitivity @@ -220,7 +251,7 @@ Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in ```go import ( log "github.com/sirupsen/logrus" - "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake" + "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "airbrake" logrus_syslog "github.com/sirupsen/logrus/hooks/syslog" "log/syslog" ) @@ -241,60 +272,15 @@ func init() { ``` Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md). -| Hook | Description | -| ----- | ----------- | -| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. | -| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. | -| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) | -| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) | -| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. | -| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic | -| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) | -| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch| -| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/) -| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd | -| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) | -| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) | -| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. | -| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger | -| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb | -| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) | -| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` | -| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka | -| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem | -| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) | -| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) | -| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) | -| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) | -| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) | -| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail | -| [Mattermost](https://github.com/shuLhan/mattermost-integration/tree/master/hooks/logrus) | Hook for logging to [Mattermost](https://mattermost.com/) | -| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb | -| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) | -| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit | -| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. | -| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) | -| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) | -| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) | -| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) | -| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar | -| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)| -| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. | -| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. | -| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) | -| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)| -| [Syslog](https://github.com/sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. | -| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. | -| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) | -| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) | -| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash | -| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) | +A list of currently known of service hook can be found in this wiki [page](https://github.com/sirupsen/logrus/wiki/Hooks) + #### Level logging -Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic. +Logrus has seven logging levels: Trace, Debug, Info, Warning, Error, Fatal and Panic. ```go +log.Trace("Something very low level.") log.Debug("Useful debugging information.") log.Info("Something noteworthy happened!") log.Warn("You should probably take a look at this.") @@ -366,16 +352,20 @@ The built-in logging formatters are: field to `true`. To force no colored output even if there is a TTY set the `DisableColors` field to `true`. For Windows, see [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable). + * When colors are enabled, levels are truncated to 4 characters by default. To disable + truncation set the `DisableLevelTruncation` field to `true`. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter). * `logrus.JSONFormatter`. Logs fields as JSON. * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter). Third party logging formatters: -* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can by parsed by Kubernetes and Google Container Engine. +* [`FluentdFormatter`](https://github.com/joonix/log). Formats entries that can be parsed by Kubernetes and Google Container Engine. +* [`GELF`](https://github.com/fabienm/go-logrus-formatters). Formats entries so they comply to Graylog's [GELF 1.1 specification](http://docs.graylog.org/en/2.4/pages/gelf.html). * [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events. * [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout. * [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦. +* [`nested-logrus-formatter`](https://github.com/antonfisher/nested-logrus-formatter). Converts logrus fields to a nested structure. You can define your formatter by implementing the `Formatter` interface, requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a @@ -489,7 +479,7 @@ logrus.RegisterExitHandler(handler) #### Thread safety -By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs. +By default, Logger is protected by a mutex for concurrent writes. The mutex is held when calling hooks and writing logs. If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking. Situation when locking is not needed includes: diff --git a/vendor/github.com/sirupsen/logrus/alt_exit.go b/vendor/github.com/sirupsen/logrus/alt_exit.go index 8af90637..8fd189e1 100644 --- a/vendor/github.com/sirupsen/logrus/alt_exit.go +++ b/vendor/github.com/sirupsen/logrus/alt_exit.go @@ -51,9 +51,9 @@ func Exit(code int) { os.Exit(code) } -// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke -// all handlers. The handlers will also be invoked when any Fatal log entry is -// made. +// RegisterExitHandler appends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. // // This method is useful when a caller wishes to use logrus to log a fatal // message but also needs to gracefully shutdown. An example usecase could be @@ -62,3 +62,15 @@ func Exit(code int) { func RegisterExitHandler(handler func()) { handlers = append(handlers, handler) } + +// DeferExitHandler prepends a Logrus Exit handler to the list of handlers, +// call logrus.Exit to invoke all handlers. The handlers will also be invoked when +// any Fatal log entry is made. +// +// This method is useful when a caller wishes to use logrus to log a fatal +// message but also needs to gracefully shutdown. An example usecase could be +// closing database connections, or sending a alert that the application is +// closing. +func DeferExitHandler(handler func()) { + handlers = append([]func(){handler}, handlers...) +} diff --git a/vendor/github.com/sirupsen/logrus/entry.go b/vendor/github.com/sirupsen/logrus/entry.go index 5bf582ef..63e25583 100644 --- a/vendor/github.com/sirupsen/logrus/entry.go +++ b/vendor/github.com/sirupsen/logrus/entry.go @@ -2,13 +2,33 @@ package logrus import ( "bytes" + "context" "fmt" "os" + "reflect" + "runtime" + "strings" "sync" "time" ) -var bufferPool *sync.Pool +var ( + bufferPool *sync.Pool + + // qualified package name, cached at first use + logrusPackage string + + // Positions in the call stack when tracing to report the calling method + minimumCallerDepth int + + // Used for caller information initialisation + callerInitOnce sync.Once +) + +const ( + maximumCallerDepth int = 25 + knownLogrusFrames int = 4 +) func init() { bufferPool = &sync.Pool{ @@ -16,15 +36,18 @@ func init() { return new(bytes.Buffer) }, } + + // start at the bottom of the stack before the package-name cache is primed + minimumCallerDepth = 1 } // Defines the key when adding errors using WithError. var ErrorKey = "error" // An entry is the final or intermediate Logrus logging entry. It contains all -// the fields passed with WithField{,s}. It's finally logged when Debug, Info, -// Warn, Error, Fatal or Panic is called on it. These objects can be reused and -// passed around as much as you wish to avoid field duplication. +// the fields passed with WithField{,s}. It's finally logged when Trace, Debug, +// Info, Warn, Error, Fatal or Panic is called on it. These objects can be +// reused and passed around as much as you wish to avoid field duplication. type Entry struct { Logger *Logger @@ -34,22 +57,31 @@ type Entry struct { // Time at which the log entry was created Time time.Time - // Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic + // Level the log entry was logged at: Trace, Debug, Info, Warn, Error, Fatal or Panic // This field will be set on entry firing and the value will be equal to the one in Logger struct field. Level Level - // Message passed to Debug, Info, Warn, Error, Fatal or Panic + // Calling method, with package name + Caller *runtime.Frame + + // Message passed to Trace, Debug, Info, Warn, Error, Fatal or Panic Message string - // When formatter is called in entry.log(), an Buffer may be set to entry + // When formatter is called in entry.log(), a Buffer may be set to entry Buffer *bytes.Buffer + + // Contains the context set by the user. Useful for hook processing etc. + Context context.Context + + // err may contain a field formatting error + err string } func NewEntry(logger *Logger) *Entry { return &Entry{ Logger: logger, - // Default is three fields, give a little extra room - Data: make(Fields, 5), + // Default is three fields, plus one optional. Give a little extra room. + Data: make(Fields, 6), } } @@ -69,6 +101,11 @@ func (entry *Entry) WithError(err error) *Entry { return entry.WithField(ErrorKey, err) } +// Add a context to the Entry. +func (entry *Entry) WithContext(ctx context.Context) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: entry.Time, err: entry.err, Context: ctx} +} + // Add a single field to the Entry. func (entry *Entry) WithField(key string, value interface{}) *Entry { return entry.WithFields(Fields{key: value}) @@ -80,43 +117,120 @@ func (entry *Entry) WithFields(fields Fields) *Entry { for k, v := range entry.Data { data[k] = v } + fieldErr := entry.err for k, v := range fields { - data[k] = v + isErrField := false + if t := reflect.TypeOf(v); t != nil { + switch t.Kind() { + case reflect.Func: + isErrField = true + case reflect.Ptr: + isErrField = t.Elem().Kind() == reflect.Func + } + } + if isErrField { + tmp := fmt.Sprintf("can not add field %q", k) + if fieldErr != "" { + fieldErr = entry.err + ", " + tmp + } else { + fieldErr = tmp + } + } else { + data[k] = v + } } - return &Entry{Logger: entry.Logger, Data: data} + return &Entry{Logger: entry.Logger, Data: data, Time: entry.Time, err: fieldErr, Context: entry.Context} +} + +// Overrides the time of the Entry. +func (entry *Entry) WithTime(t time.Time) *Entry { + return &Entry{Logger: entry.Logger, Data: entry.Data, Time: t, err: entry.err, Context: entry.Context} +} + +// getPackageName reduces a fully qualified function name to the package name +// There really ought to be to be a better way... +func getPackageName(f string) string { + for { + lastPeriod := strings.LastIndex(f, ".") + lastSlash := strings.LastIndex(f, "/") + if lastPeriod > lastSlash { + f = f[:lastPeriod] + } else { + break + } + } + + return f +} + +// getCaller retrieves the name of the first non-logrus calling function +func getCaller() *runtime.Frame { + + // cache this package's fully-qualified name + callerInitOnce.Do(func() { + pcs := make([]uintptr, 2) + _ = runtime.Callers(0, pcs) + logrusPackage = getPackageName(runtime.FuncForPC(pcs[1]).Name()) + + // now that we have the cache, we can skip a minimum count of known-logrus functions + // XXX this is dubious, the number of frames may vary + minimumCallerDepth = knownLogrusFrames + }) + + // Restrict the lookback frames to avoid runaway lookups + pcs := make([]uintptr, maximumCallerDepth) + depth := runtime.Callers(minimumCallerDepth, pcs) + frames := runtime.CallersFrames(pcs[:depth]) + + for f, again := frames.Next(); again; f, again = frames.Next() { + pkg := getPackageName(f.Function) + + // If the caller isn't part of this package, we're done + if pkg != logrusPackage { + return &f + } + } + + // if we got here, we failed to find the caller's context + return nil +} + +func (entry Entry) HasCaller() (has bool) { + return entry.Logger != nil && + entry.Logger.ReportCaller && + entry.Caller != nil } // This function is not declared with a pointer value because otherwise // race conditions will occur when using multiple goroutines func (entry Entry) log(level Level, msg string) { var buffer *bytes.Buffer - entry.Time = time.Now() + + // Default to now, but allow users to override if they want. + // + // We don't have to worry about polluting future calls to Entry#log() + // with this assignment because this function is declared with a + // non-pointer receiver. + if entry.Time.IsZero() { + entry.Time = time.Now() + } + entry.Level = level entry.Message = msg - - if err := entry.Logger.Hooks.Fire(level, &entry); err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) - entry.Logger.mu.Unlock() + if entry.Logger.ReportCaller { + entry.Caller = getCaller() } + + entry.fireHooks() + buffer = bufferPool.Get().(*bytes.Buffer) buffer.Reset() defer bufferPool.Put(buffer) entry.Buffer = buffer - serialized, err := entry.Logger.Formatter.Format(&entry) + + entry.write() + entry.Buffer = nil - if err != nil { - entry.Logger.mu.Lock() - fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) - entry.Logger.mu.Unlock() - } else { - entry.Logger.mu.Lock() - _, err = entry.Logger.Out.Write(serialized) - if err != nil { - fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) - } - entry.Logger.mu.Unlock() - } // To avoid Entry#log() returning a value that only would make sense for // panic() to use in Entry#Panic(), we avoid the allocation by checking @@ -126,26 +240,53 @@ func (entry Entry) log(level Level, msg string) { } } -func (entry *Entry) Debug(args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.log(DebugLevel, fmt.Sprint(args...)) +func (entry *Entry) fireHooks() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + err := entry.Logger.Hooks.Fire(entry.Level, entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err) } } +func (entry *Entry) write() { + entry.Logger.mu.Lock() + defer entry.Logger.mu.Unlock() + serialized, err := entry.Logger.Formatter.Format(entry) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err) + } else { + _, err = entry.Logger.Out.Write(serialized) + if err != nil { + fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err) + } + } +} + +func (entry *Entry) Log(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.log(level, fmt.Sprint(args...)) + } +} + +func (entry *Entry) Trace(args ...interface{}) { + entry.Log(TraceLevel, args...) +} + +func (entry *Entry) Debug(args ...interface{}) { + entry.Log(DebugLevel, args...) +} + func (entry *Entry) Print(args ...interface{}) { entry.Info(args...) } func (entry *Entry) Info(args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.log(InfoLevel, fmt.Sprint(args...)) - } + entry.Log(InfoLevel, args...) } func (entry *Entry) Warn(args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.log(WarnLevel, fmt.Sprint(args...)) - } + entry.Log(WarnLevel, args...) } func (entry *Entry) Warning(args ...interface{}) { @@ -153,37 +294,37 @@ func (entry *Entry) Warning(args ...interface{}) { } func (entry *Entry) Error(args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.log(ErrorLevel, fmt.Sprint(args...)) - } + entry.Log(ErrorLevel, args...) } func (entry *Entry) Fatal(args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.log(FatalLevel, fmt.Sprint(args...)) - } - Exit(1) + entry.Log(FatalLevel, args...) + entry.Logger.Exit(1) } func (entry *Entry) Panic(args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.log(PanicLevel, fmt.Sprint(args...)) - } + entry.Log(PanicLevel, args...) panic(fmt.Sprint(args...)) } // Entry Printf family functions -func (entry *Entry) Debugf(format string, args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.Debug(fmt.Sprintf(format, args...)) +func (entry *Entry) Logf(level Level, format string, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, fmt.Sprintf(format, args...)) } } +func (entry *Entry) Tracef(format string, args ...interface{}) { + entry.Logf(TraceLevel, format, args...) +} + +func (entry *Entry) Debugf(format string, args ...interface{}) { + entry.Logf(DebugLevel, format, args...) +} + func (entry *Entry) Infof(format string, args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.Info(fmt.Sprintf(format, args...)) - } + entry.Logf(InfoLevel, format, args...) } func (entry *Entry) Printf(format string, args ...interface{}) { @@ -191,9 +332,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) { } func (entry *Entry) Warnf(format string, args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.Warn(fmt.Sprintf(format, args...)) - } + entry.Logf(WarnLevel, format, args...) } func (entry *Entry) Warningf(format string, args ...interface{}) { @@ -201,36 +340,36 @@ func (entry *Entry) Warningf(format string, args ...interface{}) { } func (entry *Entry) Errorf(format string, args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.Error(fmt.Sprintf(format, args...)) - } + entry.Logf(ErrorLevel, format, args...) } func (entry *Entry) Fatalf(format string, args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.Fatal(fmt.Sprintf(format, args...)) - } - Exit(1) + entry.Logf(FatalLevel, format, args...) + entry.Logger.Exit(1) } func (entry *Entry) Panicf(format string, args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.Panic(fmt.Sprintf(format, args...)) - } + entry.Logf(PanicLevel, format, args...) } // Entry Println family functions -func (entry *Entry) Debugln(args ...interface{}) { - if entry.Logger.level() >= DebugLevel { - entry.Debug(entry.sprintlnn(args...)) +func (entry *Entry) Logln(level Level, args ...interface{}) { + if entry.Logger.IsLevelEnabled(level) { + entry.Log(level, entry.sprintlnn(args...)) } } +func (entry *Entry) Traceln(args ...interface{}) { + entry.Logln(TraceLevel, args...) +} + +func (entry *Entry) Debugln(args ...interface{}) { + entry.Logln(DebugLevel, args...) +} + func (entry *Entry) Infoln(args ...interface{}) { - if entry.Logger.level() >= InfoLevel { - entry.Info(entry.sprintlnn(args...)) - } + entry.Logln(InfoLevel, args...) } func (entry *Entry) Println(args ...interface{}) { @@ -238,9 +377,7 @@ func (entry *Entry) Println(args ...interface{}) { } func (entry *Entry) Warnln(args ...interface{}) { - if entry.Logger.level() >= WarnLevel { - entry.Warn(entry.sprintlnn(args...)) - } + entry.Logln(WarnLevel, args...) } func (entry *Entry) Warningln(args ...interface{}) { @@ -248,22 +385,16 @@ func (entry *Entry) Warningln(args ...interface{}) { } func (entry *Entry) Errorln(args ...interface{}) { - if entry.Logger.level() >= ErrorLevel { - entry.Error(entry.sprintlnn(args...)) - } + entry.Logln(ErrorLevel, args...) } func (entry *Entry) Fatalln(args ...interface{}) { - if entry.Logger.level() >= FatalLevel { - entry.Fatal(entry.sprintlnn(args...)) - } - Exit(1) + entry.Logln(FatalLevel, args...) + entry.Logger.Exit(1) } func (entry *Entry) Panicln(args ...interface{}) { - if entry.Logger.level() >= PanicLevel { - entry.Panic(entry.sprintlnn(args...)) - } + entry.Logln(PanicLevel, args...) } // Sprintlnn => Sprint no newline. This is to get the behavior of how diff --git a/vendor/github.com/sirupsen/logrus/exported.go b/vendor/github.com/sirupsen/logrus/exported.go index 013183ed..62fc2f21 100644 --- a/vendor/github.com/sirupsen/logrus/exported.go +++ b/vendor/github.com/sirupsen/logrus/exported.go @@ -1,7 +1,9 @@ package logrus import ( + "context" "io" + "time" ) var ( @@ -15,37 +17,38 @@ func StandardLogger() *Logger { // SetOutput sets the standard logger output. func SetOutput(out io.Writer) { - std.mu.Lock() - defer std.mu.Unlock() - std.Out = out + std.SetOutput(out) } // SetFormatter sets the standard logger formatter. func SetFormatter(formatter Formatter) { - std.mu.Lock() - defer std.mu.Unlock() - std.Formatter = formatter + std.SetFormatter(formatter) +} + +// SetReportCaller sets whether the standard logger will include the calling +// method as a field. +func SetReportCaller(include bool) { + std.SetReportCaller(include) } // SetLevel sets the standard logger level. func SetLevel(level Level) { - std.mu.Lock() - defer std.mu.Unlock() std.SetLevel(level) } // GetLevel returns the standard logger level. func GetLevel() Level { - std.mu.Lock() - defer std.mu.Unlock() - return std.level() + return std.GetLevel() +} + +// IsLevelEnabled checks if the log level of the standard logger is greater than the level param +func IsLevelEnabled(level Level) bool { + return std.IsLevelEnabled(level) } // AddHook adds a hook to the standard logger hooks. func AddHook(hook Hook) { - std.mu.Lock() - defer std.mu.Unlock() - std.Hooks.Add(hook) + std.AddHook(hook) } // WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key. @@ -53,6 +56,11 @@ func WithError(err error) *Entry { return std.WithField(ErrorKey, err) } +// WithContext creates an entry from the standard logger and adds a context to it. +func WithContext(ctx context.Context) *Entry { + return std.WithContext(ctx) +} + // WithField creates an entry from the standard logger and adds a field to // it. If you want multiple fields, use `WithFields`. // @@ -72,6 +80,20 @@ func WithFields(fields Fields) *Entry { return std.WithFields(fields) } +// WithTime creats an entry from the standard logger and overrides the time of +// logs generated with it. +// +// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal +// or Panic on the Entry it returns. +func WithTime(t time.Time) *Entry { + return std.WithTime(t) +} + +// Trace logs a message at level Trace on the standard logger. +func Trace(args ...interface{}) { + std.Trace(args...) +} + // Debug logs a message at level Debug on the standard logger. func Debug(args ...interface{}) { std.Debug(args...) @@ -107,11 +129,16 @@ func Panic(args ...interface{}) { std.Panic(args...) } -// Fatal logs a message at level Fatal on the standard logger. +// Fatal logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatal(args ...interface{}) { std.Fatal(args...) } +// Tracef logs a message at level Trace on the standard logger. +func Tracef(format string, args ...interface{}) { + std.Tracef(format, args...) +} + // Debugf logs a message at level Debug on the standard logger. func Debugf(format string, args ...interface{}) { std.Debugf(format, args...) @@ -147,11 +174,16 @@ func Panicf(format string, args ...interface{}) { std.Panicf(format, args...) } -// Fatalf logs a message at level Fatal on the standard logger. +// Fatalf logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalf(format string, args ...interface{}) { std.Fatalf(format, args...) } +// Traceln logs a message at level Trace on the standard logger. +func Traceln(args ...interface{}) { + std.Traceln(args...) +} + // Debugln logs a message at level Debug on the standard logger. func Debugln(args ...interface{}) { std.Debugln(args...) @@ -187,7 +219,7 @@ func Panicln(args ...interface{}) { std.Panicln(args...) } -// Fatalln logs a message at level Fatal on the standard logger. +// Fatalln logs a message at level Fatal on the standard logger then the process will exit with status set to 1. func Fatalln(args ...interface{}) { std.Fatalln(args...) } diff --git a/vendor/github.com/sirupsen/logrus/formatter.go b/vendor/github.com/sirupsen/logrus/formatter.go index b183ff5b..40888377 100644 --- a/vendor/github.com/sirupsen/logrus/formatter.go +++ b/vendor/github.com/sirupsen/logrus/formatter.go @@ -2,7 +2,16 @@ package logrus import "time" -const defaultTimestampFormat = time.RFC3339 +// Default key names for the default fields +const ( + defaultTimestampFormat = time.RFC3339 + FieldKeyMsg = "msg" + FieldKeyLevel = "level" + FieldKeyTime = "time" + FieldKeyLogrusError = "logrus_error" + FieldKeyFunc = "func" + FieldKeyFile = "file" +) // The Formatter interface is used to implement a custom Formatter. It takes an // `Entry`. It exposes all the fields, including the default ones: @@ -18,7 +27,7 @@ type Formatter interface { Format(*Entry) ([]byte, error) } -// This is to not silently overwrite `time`, `msg` and `level` fields when +// This is to not silently overwrite `time`, `msg`, `func` and `level` fields when // dumping it. If this code wasn't there doing: // // logrus.WithField("level", 1).Info("hello") @@ -30,16 +39,40 @@ type Formatter interface { // // It's not exported because it's still using Data in an opinionated way. It's to // avoid code duplication between the two default formatters. -func prefixFieldClashes(data Fields) { - if t, ok := data["time"]; ok { - data["fields.time"] = t +func prefixFieldClashes(data Fields, fieldMap FieldMap, reportCaller bool) { + timeKey := fieldMap.resolve(FieldKeyTime) + if t, ok := data[timeKey]; ok { + data["fields."+timeKey] = t + delete(data, timeKey) } - if m, ok := data["msg"]; ok { - data["fields.msg"] = m + msgKey := fieldMap.resolve(FieldKeyMsg) + if m, ok := data[msgKey]; ok { + data["fields."+msgKey] = m + delete(data, msgKey) } - if l, ok := data["level"]; ok { - data["fields.level"] = l + levelKey := fieldMap.resolve(FieldKeyLevel) + if l, ok := data[levelKey]; ok { + data["fields."+levelKey] = l + delete(data, levelKey) + } + + logrusErrKey := fieldMap.resolve(FieldKeyLogrusError) + if l, ok := data[logrusErrKey]; ok { + data["fields."+logrusErrKey] = l + delete(data, logrusErrKey) + } + + // If reportCaller is not set, 'func' will not conflict. + if reportCaller { + funcKey := fieldMap.resolve(FieldKeyFunc) + if l, ok := data[funcKey]; ok { + data["fields."+funcKey] = l + } + fileKey := fieldMap.resolve(FieldKeyFile) + if l, ok := data[fileKey]; ok { + data["fields."+fileKey] = l + } } } diff --git a/vendor/github.com/sirupsen/logrus/go.mod b/vendor/github.com/sirupsen/logrus/go.mod new file mode 100644 index 00000000..12fdf989 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/go.mod @@ -0,0 +1,10 @@ +module github.com/sirupsen/logrus + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/konsorten/go-windows-terminal-sequences v1.0.1 + github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/stretchr/objx v0.1.1 // indirect + github.com/stretchr/testify v1.2.2 + golang.org/x/sys v0.0.0-20190422165155-953cdadca894 +) diff --git a/vendor/github.com/sirupsen/logrus/go.sum b/vendor/github.com/sirupsen/logrus/go.sum new file mode 100644 index 00000000..596c318b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/go.sum @@ -0,0 +1,16 @@ +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe h1:CHRGQ8V7OlCYtwaKPJi3iA7J+YdNKdo8j7nG5IgDhjs= +github.com/konsorten/go-windows-terminal-sequences v0.0.0-20180402223658-b729f2633dfe/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33 h1:I6FyU15t786LL7oL/hn43zqTuEGr4PN7F4XJ1p4E3Y8= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/vendor/github.com/sirupsen/logrus/json_formatter.go b/vendor/github.com/sirupsen/logrus/json_formatter.go index fb01c1b1..098a21a0 100644 --- a/vendor/github.com/sirupsen/logrus/json_formatter.go +++ b/vendor/github.com/sirupsen/logrus/json_formatter.go @@ -1,8 +1,10 @@ package logrus import ( + "bytes" "encoding/json" "fmt" + "runtime" ) type fieldKey string @@ -10,13 +12,6 @@ type fieldKey string // FieldMap allows customization of the key names for default fields. type FieldMap map[fieldKey]string -// Default key names for the default fields -const ( - FieldKeyMsg = "msg" - FieldKeyLevel = "level" - FieldKeyTime = "time" -) - func (f FieldMap) resolve(key fieldKey) string { if k, ok := f[key]; ok { return k @@ -33,21 +28,34 @@ type JSONFormatter struct { // DisableTimestamp allows disabling automatic timestamps in output DisableTimestamp bool + // DataKey allows users to put all the log entry parameters into a nested dictionary at a given key. + DataKey string + // FieldMap allows users to customize the names of keys for default fields. // As an example: // formatter := &JSONFormatter{ // FieldMap: FieldMap{ - // FieldKeyTime: "@timestamp", + // FieldKeyTime: "@timestamp", // FieldKeyLevel: "@level", - // FieldKeyMsg: "@message", + // FieldKeyMsg: "@message", + // FieldKeyFunc: "@caller", // }, // } FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the json data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from json fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + // PrettyPrint will indent all json logs + PrettyPrint bool } // Format renders a single log entry func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { - data := make(Fields, len(entry.Data)+3) + data := make(Fields, len(entry.Data)+4) for k, v := range entry.Data { switch v := v.(type) { case error: @@ -58,22 +66,56 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) { data[k] = v } } - prefixFieldClashes(data) + + if f.DataKey != "" { + newData := make(Fields, 4) + newData[f.DataKey] = data + data = newData + } + + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } + if entry.err != "" { + data[f.FieldMap.resolve(FieldKeyLogrusError)] = entry.err + } if !f.DisableTimestamp { data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat) } data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String() - - serialized, err := json.Marshal(data) - if err != nil { - return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err) + if entry.HasCaller() { + funcVal := entry.Caller.Function + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + if funcVal != "" { + data[f.FieldMap.resolve(FieldKeyFunc)] = funcVal + } + if fileVal != "" { + data[f.FieldMap.resolve(FieldKeyFile)] = fileVal + } } - return append(serialized, '\n'), nil + + var b *bytes.Buffer + if entry.Buffer != nil { + b = entry.Buffer + } else { + b = &bytes.Buffer{} + } + + encoder := json.NewEncoder(b) + if f.PrettyPrint { + encoder.SetIndent("", " ") + } + if err := encoder.Encode(data); err != nil { + return nil, fmt.Errorf("failed to marshal fields to JSON, %v", err) + } + + return b.Bytes(), nil } diff --git a/vendor/github.com/sirupsen/logrus/logger.go b/vendor/github.com/sirupsen/logrus/logger.go index 2acab050..c0c0b1e5 100644 --- a/vendor/github.com/sirupsen/logrus/logger.go +++ b/vendor/github.com/sirupsen/logrus/logger.go @@ -1,16 +1,18 @@ package logrus import ( + "context" "io" "os" "sync" "sync/atomic" + "time" ) type Logger struct { // The logs are `io.Copy`'d to this in a mutex. It's common to set this to a // file, or leave it default which is `os.Stderr`. You can also set this to - // something more adventorous, such as logging to Kafka. + // something more adventurous, such as logging to Kafka. Out io.Writer // Hooks for the logger instance. These allow firing events based on logging // levels and log entries. For example, to send errors to an error tracking @@ -23,6 +25,10 @@ type Logger struct { // own that implements the `Formatter` interface, see the `README` or included // formatters for examples. Formatter Formatter + + // Flag for whether to log caller info (off by default) + ReportCaller bool + // The logging level the logger should log at. This is typically (and defaults // to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be // logged. @@ -31,8 +37,12 @@ type Logger struct { mu MutexWrap // Reusable empty entry entryPool sync.Pool + // Function to exit the application, defaults to `os.Exit()` + ExitFunc exitFunc } +type exitFunc func(int) + type MutexWrap struct { lock sync.Mutex disabled bool @@ -68,10 +78,12 @@ func (mw *MutexWrap) Disable() { // It's recommended to make this a global instance called `log`. func New() *Logger { return &Logger{ - Out: os.Stderr, - Formatter: new(TextFormatter), - Hooks: make(LevelHooks), - Level: InfoLevel, + Out: os.Stderr, + Formatter: new(TextFormatter), + Hooks: make(LevelHooks), + Level: InfoLevel, + ExitFunc: os.Exit, + ReportCaller: false, } } @@ -84,11 +96,12 @@ func (logger *Logger) newEntry() *Entry { } func (logger *Logger) releaseEntry(entry *Entry) { + entry.Data = map[string]interface{}{} logger.entryPool.Put(entry) } // Adds a field to the log entry, note that it doesn't log until you call -// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry. +// Debug, Print, Info, Warn, Error, Fatal or Panic. It only creates a log entry. // If you want multiple fields, use `WithFields`. func (logger *Logger) WithField(key string, value interface{}) *Entry { entry := logger.newEntry() @@ -112,20 +125,38 @@ func (logger *Logger) WithError(err error) *Entry { return entry.WithError(err) } -func (logger *Logger) Debugf(format string, args ...interface{}) { - if logger.level() >= DebugLevel { +// Add a context to the log entry. +func (logger *Logger) WithContext(ctx context.Context) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithContext(ctx) +} + +// Overrides the time of the log entry. +func (logger *Logger) WithTime(t time.Time) *Entry { + entry := logger.newEntry() + defer logger.releaseEntry(entry) + return entry.WithTime(t) +} + +func (logger *Logger) Logf(level Level, format string, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Debugf(format, args...) + entry.Logf(level, format, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Tracef(format string, args ...interface{}) { + logger.Logf(TraceLevel, format, args...) +} + +func (logger *Logger) Debugf(format string, args ...interface{}) { + logger.Logf(DebugLevel, format, args...) +} + func (logger *Logger) Infof(format string, args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Infof(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(InfoLevel, format, args...) } func (logger *Logger) Printf(format string, args ...interface{}) { @@ -135,123 +166,91 @@ func (logger *Logger) Printf(format string, args ...interface{}) { } func (logger *Logger) Warnf(format string, args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(WarnLevel, format, args...) } func (logger *Logger) Warningf(format string, args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnf(format, args...) - logger.releaseEntry(entry) - } + logger.Warnf(format, args...) } func (logger *Logger) Errorf(format string, args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Errorf(format, args...) - logger.releaseEntry(entry) - } + logger.Logf(ErrorLevel, format, args...) } func (logger *Logger) Fatalf(format string, args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatalf(format, args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Logf(FatalLevel, format, args...) + logger.Exit(1) } func (logger *Logger) Panicf(format string, args ...interface{}) { - if logger.level() >= PanicLevel { + logger.Logf(PanicLevel, format, args...) +} + +func (logger *Logger) Log(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Panicf(format, args...) + entry.Log(level, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Trace(args ...interface{}) { + logger.Log(TraceLevel, args...) +} + func (logger *Logger) Debug(args ...interface{}) { - if logger.level() >= DebugLevel { - entry := logger.newEntry() - entry.Debug(args...) - logger.releaseEntry(entry) - } + logger.Log(DebugLevel, args...) } func (logger *Logger) Info(args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Info(args...) - logger.releaseEntry(entry) - } + logger.Log(InfoLevel, args...) } func (logger *Logger) Print(args ...interface{}) { entry := logger.newEntry() - entry.Info(args...) + entry.Print(args...) logger.releaseEntry(entry) } func (logger *Logger) Warn(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warn(args...) - logger.releaseEntry(entry) - } + logger.Log(WarnLevel, args...) } func (logger *Logger) Warning(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warn(args...) - logger.releaseEntry(entry) - } + logger.Warn(args...) } func (logger *Logger) Error(args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Error(args...) - logger.releaseEntry(entry) - } + logger.Log(ErrorLevel, args...) } func (logger *Logger) Fatal(args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatal(args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Log(FatalLevel, args...) + logger.Exit(1) } func (logger *Logger) Panic(args ...interface{}) { - if logger.level() >= PanicLevel { + logger.Log(PanicLevel, args...) +} + +func (logger *Logger) Logln(level Level, args ...interface{}) { + if logger.IsLevelEnabled(level) { entry := logger.newEntry() - entry.Panic(args...) + entry.Logln(level, args...) logger.releaseEntry(entry) } } +func (logger *Logger) Traceln(args ...interface{}) { + logger.Logln(TraceLevel, args...) +} + func (logger *Logger) Debugln(args ...interface{}) { - if logger.level() >= DebugLevel { - entry := logger.newEntry() - entry.Debugln(args...) - logger.releaseEntry(entry) - } + logger.Logln(DebugLevel, args...) } func (logger *Logger) Infoln(args ...interface{}) { - if logger.level() >= InfoLevel { - entry := logger.newEntry() - entry.Infoln(args...) - logger.releaseEntry(entry) - } + logger.Logln(InfoLevel, args...) } func (logger *Logger) Println(args ...interface{}) { @@ -261,44 +260,32 @@ func (logger *Logger) Println(args ...interface{}) { } func (logger *Logger) Warnln(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnln(args...) - logger.releaseEntry(entry) - } + logger.Logln(WarnLevel, args...) } func (logger *Logger) Warningln(args ...interface{}) { - if logger.level() >= WarnLevel { - entry := logger.newEntry() - entry.Warnln(args...) - logger.releaseEntry(entry) - } + logger.Warnln(args...) } func (logger *Logger) Errorln(args ...interface{}) { - if logger.level() >= ErrorLevel { - entry := logger.newEntry() - entry.Errorln(args...) - logger.releaseEntry(entry) - } + logger.Logln(ErrorLevel, args...) } func (logger *Logger) Fatalln(args ...interface{}) { - if logger.level() >= FatalLevel { - entry := logger.newEntry() - entry.Fatalln(args...) - logger.releaseEntry(entry) - } - Exit(1) + logger.Logln(FatalLevel, args...) + logger.Exit(1) } func (logger *Logger) Panicln(args ...interface{}) { - if logger.level() >= PanicLevel { - entry := logger.newEntry() - entry.Panicln(args...) - logger.releaseEntry(entry) + logger.Logln(PanicLevel, args...) +} + +func (logger *Logger) Exit(code int) { + runHandlers() + if logger.ExitFunc == nil { + logger.ExitFunc = os.Exit } + logger.ExitFunc(code) } //When file is opened with appending mode, it's safe to @@ -312,6 +299,53 @@ func (logger *Logger) level() Level { return Level(atomic.LoadUint32((*uint32)(&logger.Level))) } +// SetLevel sets the logger level. func (logger *Logger) SetLevel(level Level) { atomic.StoreUint32((*uint32)(&logger.Level), uint32(level)) } + +// GetLevel returns the logger level. +func (logger *Logger) GetLevel() Level { + return logger.level() +} + +// AddHook adds a hook to the logger hooks. +func (logger *Logger) AddHook(hook Hook) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Hooks.Add(hook) +} + +// IsLevelEnabled checks if the log level of the logger is greater than the level param +func (logger *Logger) IsLevelEnabled(level Level) bool { + return logger.level() >= level +} + +// SetFormatter sets the logger formatter. +func (logger *Logger) SetFormatter(formatter Formatter) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Formatter = formatter +} + +// SetOutput sets the logger output. +func (logger *Logger) SetOutput(output io.Writer) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.Out = output +} + +func (logger *Logger) SetReportCaller(reportCaller bool) { + logger.mu.Lock() + defer logger.mu.Unlock() + logger.ReportCaller = reportCaller +} + +// ReplaceHooks replaces the logger hooks and returns the old ones +func (logger *Logger) ReplaceHooks(hooks LevelHooks) LevelHooks { + logger.mu.Lock() + oldHooks := logger.Hooks + logger.Hooks = hooks + logger.mu.Unlock() + return oldHooks +} diff --git a/vendor/github.com/sirupsen/logrus/logrus.go b/vendor/github.com/sirupsen/logrus/logrus.go index dd389997..8644761f 100644 --- a/vendor/github.com/sirupsen/logrus/logrus.go +++ b/vendor/github.com/sirupsen/logrus/logrus.go @@ -14,22 +14,11 @@ type Level uint32 // Convert the Level to a string. E.g. PanicLevel becomes "panic". func (level Level) String() string { - switch level { - case DebugLevel: - return "debug" - case InfoLevel: - return "info" - case WarnLevel: - return "warning" - case ErrorLevel: - return "error" - case FatalLevel: - return "fatal" - case PanicLevel: - return "panic" + if b, err := level.MarshalText(); err == nil { + return string(b) + } else { + return "unknown" } - - return "unknown" } // ParseLevel takes a string level and returns the Logrus log level constant. @@ -47,12 +36,47 @@ func ParseLevel(lvl string) (Level, error) { return InfoLevel, nil case "debug": return DebugLevel, nil + case "trace": + return TraceLevel, nil } var l Level return l, fmt.Errorf("not a valid logrus Level: %q", lvl) } +// UnmarshalText implements encoding.TextUnmarshaler. +func (level *Level) UnmarshalText(text []byte) error { + l, err := ParseLevel(string(text)) + if err != nil { + return err + } + + *level = Level(l) + + return nil +} + +func (level Level) MarshalText() ([]byte, error) { + switch level { + case TraceLevel: + return []byte("trace"), nil + case DebugLevel: + return []byte("debug"), nil + case InfoLevel: + return []byte("info"), nil + case WarnLevel: + return []byte("warning"), nil + case ErrorLevel: + return []byte("error"), nil + case FatalLevel: + return []byte("fatal"), nil + case PanicLevel: + return []byte("panic"), nil + } + + return nil, fmt.Errorf("not a valid logrus level %d", level) +} + // A constant exposing all logging levels var AllLevels = []Level{ PanicLevel, @@ -61,6 +85,7 @@ var AllLevels = []Level{ WarnLevel, InfoLevel, DebugLevel, + TraceLevel, } // These are the different logging levels. You can set the logging level to log @@ -69,7 +94,7 @@ const ( // PanicLevel level, highest level of severity. Logs and then calls panic with the // message passed to Debug, Info, ... PanicLevel Level = iota - // FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the + // FatalLevel level. Logs and then calls `logger.Exit(1)`. It will exit even if the // logging level is set to Panic. FatalLevel // ErrorLevel level. Logs. Used for errors that should definitely be noted. @@ -82,6 +107,8 @@ const ( InfoLevel // DebugLevel level. Usually only enabled when debugging. Very verbose logging. DebugLevel + // TraceLevel level. Designates finer-grained informational events than the Debug. + TraceLevel ) // Won't compile if StdLogger can't be realized by a log.Logger @@ -140,4 +167,20 @@ type FieldLogger interface { Errorln(args ...interface{}) Fatalln(args ...interface{}) Panicln(args ...interface{}) + + // IsDebugEnabled() bool + // IsInfoEnabled() bool + // IsWarnEnabled() bool + // IsErrorEnabled() bool + // IsFatalEnabled() bool + // IsPanicEnabled() bool +} + +// Ext1FieldLogger (the first extension to FieldLogger) is superfluous, it is +// here for consistancy. Do not use. Use Logger or Entry instead. +type Ext1FieldLogger interface { + FieldLogger + Tracef(format string, args ...interface{}) + Trace(args ...interface{}) + Traceln(args ...interface{}) } diff --git a/vendor/github.com/sirupsen/logrus/terminal_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_bsd.go deleted file mode 100644 index d7b3893f..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_bsd.go +++ /dev/null @@ -1,10 +0,0 @@ -// +build darwin freebsd openbsd netbsd dragonfly -// +build !appengine - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TIOCGETA - -type Termios unix.Termios diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go new file mode 100644 index 00000000..2403de98 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_appengine.go @@ -0,0 +1,11 @@ +// +build appengine + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return true +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go new file mode 100644 index 00000000..3c4f43f9 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_bsd.go @@ -0,0 +1,13 @@ +// +build darwin dragonfly freebsd netbsd openbsd + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TIOCGETA + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go new file mode 100644 index 00000000..97af92c6 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_no_terminal.go @@ -0,0 +1,11 @@ +// +build js nacl plan9 + +package logrus + +import ( + "io" +) + +func checkIfTerminal(w io.Writer) bool { + return false +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go new file mode 100644 index 00000000..3293fb3c --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_notappengine.go @@ -0,0 +1,17 @@ +// +build !appengine,!js,!windows,!nacl,!plan9 + +package logrus + +import ( + "io" + "os" +) + +func checkIfTerminal(w io.Writer) bool { + switch v := w.(type) { + case *os.File: + return isTerminal(int(v.Fd())) + default: + return false + } +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go new file mode 100644 index 00000000..f6710b3b --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_solaris.go @@ -0,0 +1,11 @@ +package logrus + +import ( + "golang.org/x/sys/unix" +) + +// IsTerminal returns true if the given file descriptor is a terminal. +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermio(fd, unix.TCGETA) + return err == nil +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_unix.go b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go new file mode 100644 index 00000000..355dc966 --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_unix.go @@ -0,0 +1,13 @@ +// +build linux aix + +package logrus + +import "golang.org/x/sys/unix" + +const ioctlReadTermios = unix.TCGETS + +func isTerminal(fd int) bool { + _, err := unix.IoctlGetTermios(fd, ioctlReadTermios) + return err == nil +} + diff --git a/vendor/github.com/sirupsen/logrus/terminal_check_windows.go b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go new file mode 100644 index 00000000..572889db --- /dev/null +++ b/vendor/github.com/sirupsen/logrus/terminal_check_windows.go @@ -0,0 +1,34 @@ +// +build !appengine,!js,windows + +package logrus + +import ( + "io" + "os" + "syscall" + + sequences "github.com/konsorten/go-windows-terminal-sequences" +) + +func initTerminal(w io.Writer) { + switch v := w.(type) { + case *os.File: + sequences.EnableVirtualTerminalProcessing(syscall.Handle(v.Fd()), true) + } +} + +func checkIfTerminal(w io.Writer) bool { + var ret bool + switch v := w.(type) { + case *os.File: + var mode uint32 + err := syscall.GetConsoleMode(syscall.Handle(v.Fd()), &mode) + ret = (err == nil) + default: + ret = false + } + if ret { + initTerminal(w) + } + return ret +} diff --git a/vendor/github.com/sirupsen/logrus/terminal_linux.go b/vendor/github.com/sirupsen/logrus/terminal_linux.go deleted file mode 100644 index 88d7298e..00000000 --- a/vendor/github.com/sirupsen/logrus/terminal_linux.go +++ /dev/null @@ -1,14 +0,0 @@ -// Based on ssh/terminal: -// Copyright 2013 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !appengine - -package logrus - -import "golang.org/x/sys/unix" - -const ioctlReadTermios = unix.TCGETS - -type Termios unix.Termios diff --git a/vendor/github.com/sirupsen/logrus/text_formatter.go b/vendor/github.com/sirupsen/logrus/text_formatter.go index be412aa9..e01587c4 100644 --- a/vendor/github.com/sirupsen/logrus/text_formatter.go +++ b/vendor/github.com/sirupsen/logrus/text_formatter.go @@ -3,28 +3,22 @@ package logrus import ( "bytes" "fmt" - "io" "os" + "runtime" "sort" "strings" "sync" "time" - - "golang.org/x/crypto/ssh/terminal" ) const ( - nocolor = 0 - red = 31 - green = 32 - yellow = 33 - blue = 36 - gray = 37 + red = 31 + yellow = 33 + blue = 36 + gray = 37 ) -var ( - baseTimestamp time.Time -) +var baseTimestamp time.Time func init() { baseTimestamp = time.Now() @@ -38,6 +32,9 @@ type TextFormatter struct { // Force disabling colors. DisableColors bool + // Override coloring based on CLICOLOR and CLICOLOR_FORCE. - https://bixense.com/clicolors/ + EnvironmentOverrideColors bool + // Disable timestamp logging. useful when output is redirected to logging // system that already adds timestamps. DisableTimestamp bool @@ -54,69 +51,151 @@ type TextFormatter struct { // be desired. DisableSorting bool + // The keys sorting function, when uninitialized it uses sort.Strings. + SortingFunc func([]string) + + // Disables the truncation of the level text to 4 characters. + DisableLevelTruncation bool + // QuoteEmptyFields will wrap empty fields in quotes if true QuoteEmptyFields bool // Whether the logger's out is to a terminal isTerminal bool - sync.Once + // FieldMap allows users to customize the names of keys for default fields. + // As an example: + // formatter := &TextFormatter{ + // FieldMap: FieldMap{ + // FieldKeyTime: "@timestamp", + // FieldKeyLevel: "@level", + // FieldKeyMsg: "@message"}} + FieldMap FieldMap + + // CallerPrettyfier can be set by the user to modify the content + // of the function and file keys in the data when ReportCaller is + // activated. If any of the returned value is the empty string the + // corresponding key will be removed from fields. + CallerPrettyfier func(*runtime.Frame) (function string, file string) + + terminalInitOnce sync.Once } func (f *TextFormatter) init(entry *Entry) { if entry.Logger != nil { - f.isTerminal = f.checkIfTerminal(entry.Logger.Out) + f.isTerminal = checkIfTerminal(entry.Logger.Out) } } -func (f *TextFormatter) checkIfTerminal(w io.Writer) bool { - switch v := w.(type) { - case *os.File: - return terminal.IsTerminal(int(v.Fd())) - default: - return false +func (f *TextFormatter) isColored() bool { + isColored := f.ForceColors || (f.isTerminal && (runtime.GOOS != "windows")) + + if f.EnvironmentOverrideColors { + if force, ok := os.LookupEnv("CLICOLOR_FORCE"); ok && force != "0" { + isColored = true + } else if ok && force == "0" { + isColored = false + } else if os.Getenv("CLICOLOR") == "0" { + isColored = false + } } + + return isColored && !f.DisableColors } // Format renders a single log entry func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { - var b *bytes.Buffer - keys := make([]string, 0, len(entry.Data)) - for k := range entry.Data { + data := make(Fields) + for k, v := range entry.Data { + data[k] = v + } + prefixFieldClashes(data, f.FieldMap, entry.HasCaller()) + keys := make([]string, 0, len(data)) + for k := range data { keys = append(keys, k) } - if !f.DisableSorting { - sort.Strings(keys) + var funcVal, fileVal string + + fixedKeys := make([]string, 0, 4+len(data)) + if !f.DisableTimestamp { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyTime)) } + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLevel)) + if entry.Message != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyMsg)) + } + if entry.err != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyLogrusError)) + } + if entry.HasCaller() { + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } else { + funcVal = entry.Caller.Function + fileVal = fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + } + + if funcVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFunc)) + } + if fileVal != "" { + fixedKeys = append(fixedKeys, f.FieldMap.resolve(FieldKeyFile)) + } + } + + if !f.DisableSorting { + if f.SortingFunc == nil { + sort.Strings(keys) + fixedKeys = append(fixedKeys, keys...) + } else { + if !f.isColored() { + fixedKeys = append(fixedKeys, keys...) + f.SortingFunc(fixedKeys) + } else { + f.SortingFunc(keys) + } + } + } else { + fixedKeys = append(fixedKeys, keys...) + } + + var b *bytes.Buffer if entry.Buffer != nil { b = entry.Buffer } else { b = &bytes.Buffer{} } - prefixFieldClashes(entry.Data) - - f.Do(func() { f.init(entry) }) - - isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors + f.terminalInitOnce.Do(func() { f.init(entry) }) timestampFormat := f.TimestampFormat if timestampFormat == "" { timestampFormat = defaultTimestampFormat } - if isColored { - f.printColored(b, entry, keys, timestampFormat) + if f.isColored() { + f.printColored(b, entry, keys, data, timestampFormat) } else { - if !f.DisableTimestamp { - f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat)) - } - f.appendKeyValue(b, "level", entry.Level.String()) - if entry.Message != "" { - f.appendKeyValue(b, "msg", entry.Message) - } - for _, key := range keys { - f.appendKeyValue(b, key, entry.Data[key]) + + for _, key := range fixedKeys { + var value interface{} + switch { + case key == f.FieldMap.resolve(FieldKeyTime): + value = entry.Time.Format(timestampFormat) + case key == f.FieldMap.resolve(FieldKeyLevel): + value = entry.Level.String() + case key == f.FieldMap.resolve(FieldKeyMsg): + value = entry.Message + case key == f.FieldMap.resolve(FieldKeyLogrusError): + value = entry.err + case key == f.FieldMap.resolve(FieldKeyFunc) && entry.HasCaller(): + value = funcVal + case key == f.FieldMap.resolve(FieldKeyFile) && entry.HasCaller(): + value = fileVal + default: + value = data[key] + } + f.appendKeyValue(b, key, value) } } @@ -124,10 +203,10 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) { return b.Bytes(), nil } -func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) { +func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, data Fields, timestampFormat string) { var levelColor int switch entry.Level { - case DebugLevel: + case DebugLevel, TraceLevel: levelColor = gray case WarnLevel: levelColor = yellow @@ -137,17 +216,42 @@ func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []strin levelColor = blue } - levelText := strings.ToUpper(entry.Level.String())[0:4] + levelText := strings.ToUpper(entry.Level.String()) + if !f.DisableLevelTruncation { + levelText = levelText[0:4] + } + + // Remove a single newline if it already exists in the message to keep + // the behavior of logrus text_formatter the same as the stdlib log package + entry.Message = strings.TrimSuffix(entry.Message, "\n") + + caller := "" + if entry.HasCaller() { + funcVal := fmt.Sprintf("%s()", entry.Caller.Function) + fileVal := fmt.Sprintf("%s:%d", entry.Caller.File, entry.Caller.Line) + + if f.CallerPrettyfier != nil { + funcVal, fileVal = f.CallerPrettyfier(entry.Caller) + } + + if fileVal == "" { + caller = funcVal + } else if funcVal == "" { + caller = fileVal + } else { + caller = fileVal + " " + funcVal + } + } if f.DisableTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m%s %-44s ", levelColor, levelText, caller, entry.Message) } else if !f.FullTimestamp { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d]%s %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), caller, entry.Message) } else { - fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message) + fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s]%s %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), caller, entry.Message) } for _, k := range keys { - v := entry.Data[k] + v := data[k] fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k) f.appendValue(b, v) } diff --git a/vendor/github.com/sirupsen/logrus/writer.go b/vendor/github.com/sirupsen/logrus/writer.go index 7bdebedc..9e1f7513 100644 --- a/vendor/github.com/sirupsen/logrus/writer.go +++ b/vendor/github.com/sirupsen/logrus/writer.go @@ -24,6 +24,8 @@ func (entry *Entry) WriterLevel(level Level) *io.PipeWriter { var printFunc func(args ...interface{}) switch level { + case TraceLevel: + printFunc = entry.Trace case DebugLevel: printFunc = entry.Debug case InfoLevel: