TUN-3470: Replace in-house logger calls with zerolog
This commit is contained in:
parent
06404bf3e8
commit
870f5fa907
|
@ -12,8 +12,9 @@ import (
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/token"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/token"
|
||||||
"github.com/cloudflare/cloudflared/h2mux"
|
"github.com/cloudflare/cloudflared/h2mux"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type StartOptions struct {
|
type StartOptions struct {
|
||||||
|
@ -49,7 +50,7 @@ func (c *StdinoutStream) Write(p []byte) (int, error) {
|
||||||
// Helper to allow defering the response close with a check that the resp is not nil
|
// Helper to allow defering the response close with a check that the resp is not nil
|
||||||
func closeRespBody(resp *http.Response) {
|
func closeRespBody(resp *http.Response) {
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
resp.Body.Close()
|
_ = resp.Body.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -103,7 +104,7 @@ func Serve(remoteConn Connection, listener net.Listener, shutdownC <-chan struct
|
||||||
// serveConnection handles connections for the Serve() call
|
// serveConnection handles connections for the Serve() call
|
||||||
func serveConnection(remoteConn Connection, c net.Conn, options *StartOptions) {
|
func serveConnection(remoteConn Connection, c net.Conn, options *StartOptions) {
|
||||||
defer c.Close()
|
defer c.Close()
|
||||||
remoteConn.ServeStream(options, c)
|
_ = remoteConn.ServeStream(options, c)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsAccessResponse checks the http Response to see if the url location
|
// IsAccessResponse checks the http Response to see if the url location
|
||||||
|
@ -125,13 +126,13 @@ func IsAccessResponse(resp *http.Response) bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// BuildAccessRequest builds an HTTP request with the Access token set
|
// BuildAccessRequest builds an HTTP request with the Access token set
|
||||||
func BuildAccessRequest(options *StartOptions, logger logger.Service) (*http.Request, error) {
|
func BuildAccessRequest(options *StartOptions, log *zerolog.Logger) (*http.Request, error) {
|
||||||
req, err := http.NewRequest(http.MethodGet, options.OriginURL, nil)
|
req, err := http.NewRequest(http.MethodGet, options.OriginURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
token, err := token.FetchTokenWithRedirect(req.URL, logger)
|
token, err := token.FetchTokenWithRedirect(req.URL, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,8 +9,8 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
ws "github.com/gorilla/websocket"
|
ws "github.com/gorilla/websocket"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -43,8 +43,8 @@ func (s *testStreamer) Write(p []byte) (int, error) {
|
||||||
|
|
||||||
func TestStartClient(t *testing.T) {
|
func TestStartClient(t *testing.T) {
|
||||||
message := "Good morning Austin! Time for another sunny day in the great state of Texas."
|
message := "Good morning Austin! Time for another sunny day in the great state of Texas."
|
||||||
logger := logger.NewOutputWriter(logger.NewMockWriteManager())
|
log := zerolog.Nop()
|
||||||
wsConn := NewWSConnection(logger, false)
|
wsConn := NewWSConnection(&log, false)
|
||||||
ts := newTestWebSocketServer()
|
ts := newTestWebSocketServer()
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
|
|
||||||
|
@ -55,10 +55,10 @@ func TestStartClient(t *testing.T) {
|
||||||
}
|
}
|
||||||
err := StartClient(wsConn, buf, options)
|
err := StartClient(wsConn, buf, options)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
buf.Write([]byte(message))
|
_, _ = buf.Write([]byte(message))
|
||||||
|
|
||||||
readBuffer := make([]byte, len(message))
|
readBuffer := make([]byte, len(message))
|
||||||
buf.Read(readBuffer)
|
_, _ = buf.Read(readBuffer)
|
||||||
assert.Equal(t, message, string(readBuffer))
|
assert.Equal(t, message, string(readBuffer))
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -68,9 +68,9 @@ func TestStartServer(t *testing.T) {
|
||||||
t.Fatalf("Error starting listener: %v", err)
|
t.Fatalf("Error starting listener: %v", err)
|
||||||
}
|
}
|
||||||
message := "Good morning Austin! Time for another sunny day in the great state of Texas."
|
message := "Good morning Austin! Time for another sunny day in the great state of Texas."
|
||||||
logger := logger.NewOutputWriter(logger.NewMockWriteManager())
|
log := zerolog.Nop()
|
||||||
shutdownC := make(chan struct{})
|
shutdownC := make(chan struct{})
|
||||||
wsConn := NewWSConnection(logger, false)
|
wsConn := NewWSConnection(&log, false)
|
||||||
ts := newTestWebSocketServer()
|
ts := newTestWebSocketServer()
|
||||||
defer ts.Close()
|
defer ts.Close()
|
||||||
options := &StartOptions{
|
options := &StartOptions{
|
||||||
|
@ -86,10 +86,10 @@ func TestStartServer(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
conn, err := net.Dial("tcp", listener.Addr().String())
|
conn, err := net.Dial("tcp", listener.Addr().String())
|
||||||
conn.Write([]byte(message))
|
_, _ = conn.Write([]byte(message))
|
||||||
|
|
||||||
readBuffer := make([]byte, len(message))
|
readBuffer := make([]byte, len(message))
|
||||||
conn.Read(readBuffer)
|
_, _ = conn.Read(readBuffer)
|
||||||
assert.Equal(t, string(readBuffer), message)
|
assert.Equal(t, string(readBuffer), message)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,16 +8,17 @@ import (
|
||||||
"net/http/httputil"
|
"net/http/httputil"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/token"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/token"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/socks"
|
"github.com/cloudflare/cloudflared/socks"
|
||||||
cfwebsocket "github.com/cloudflare/cloudflared/websocket"
|
cfwebsocket "github.com/cloudflare/cloudflared/websocket"
|
||||||
|
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Websocket is used to carry data via WS binary frames over the tunnel from client to the origin
|
// Websocket is used to carry data via WS binary frames over the tunnel from client to the origin
|
||||||
// This implements the functions for glider proxy (sock5) and the carrier interface
|
// This implements the functions for glider proxy (sock5) and the carrier interface
|
||||||
type Websocket struct {
|
type Websocket struct {
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
isSocks bool
|
isSocks bool
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,9 +37,9 @@ func (d *wsdialer) Dial(address string) (io.ReadWriteCloser, *socks.AddrSpec, er
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewWSConnection returns a new connection object
|
// NewWSConnection returns a new connection object
|
||||||
func NewWSConnection(logger logger.Service, isSocks bool) Connection {
|
func NewWSConnection(log *zerolog.Logger, isSocks bool) Connection {
|
||||||
return &Websocket{
|
return &Websocket{
|
||||||
logger: logger,
|
log: log,
|
||||||
isSocks: isSocks,
|
isSocks: isSocks,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -46,9 +47,9 @@ func NewWSConnection(logger logger.Service, isSocks bool) Connection {
|
||||||
// ServeStream will create a Websocket client stream connection to the edge
|
// ServeStream will create a Websocket client stream connection to the edge
|
||||||
// it blocks and writes the raw data from conn over the tunnel
|
// it blocks and writes the raw data from conn over the tunnel
|
||||||
func (ws *Websocket) ServeStream(options *StartOptions, conn io.ReadWriter) error {
|
func (ws *Websocket) ServeStream(options *StartOptions, conn io.ReadWriter) error {
|
||||||
wsConn, err := createWebsocketStream(options, ws.logger)
|
wsConn, err := createWebsocketStream(options, ws.log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
ws.logger.Errorf("failed to connect to %s with error: %s", options.OriginURL, err)
|
ws.log.Error().Msgf("failed to connect to %s with error: %s", options.OriginURL, err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer wsConn.Close()
|
defer wsConn.Close()
|
||||||
|
@ -58,7 +59,7 @@ func (ws *Websocket) ServeStream(options *StartOptions, conn io.ReadWriter) erro
|
||||||
requestHandler := socks.NewRequestHandler(dialer)
|
requestHandler := socks.NewRequestHandler(dialer)
|
||||||
socksServer := socks.NewConnectionHandler(requestHandler)
|
socksServer := socks.NewConnectionHandler(requestHandler)
|
||||||
|
|
||||||
socksServer.Serve(conn)
|
_ = socksServer.Serve(conn)
|
||||||
} else {
|
} else {
|
||||||
cfwebsocket.Stream(wsConn, conn)
|
cfwebsocket.Stream(wsConn, conn)
|
||||||
}
|
}
|
||||||
|
@ -68,13 +69,13 @@ func (ws *Websocket) ServeStream(options *StartOptions, conn io.ReadWriter) erro
|
||||||
// StartServer creates a Websocket server to listen for connections.
|
// StartServer creates a Websocket server to listen for connections.
|
||||||
// This is used on the origin (tunnel) side to take data from the muxer and send it to the origin
|
// This is used on the origin (tunnel) side to take data from the muxer and send it to the origin
|
||||||
func (ws *Websocket) StartServer(listener net.Listener, remote string, shutdownC <-chan struct{}) error {
|
func (ws *Websocket) StartServer(listener net.Listener, remote string, shutdownC <-chan struct{}) error {
|
||||||
return cfwebsocket.StartProxyServer(ws.logger, listener, remote, shutdownC, cfwebsocket.DefaultStreamHandler)
|
return cfwebsocket.StartProxyServer(ws.log, listener, remote, shutdownC, cfwebsocket.DefaultStreamHandler)
|
||||||
}
|
}
|
||||||
|
|
||||||
// createWebsocketStream will create a WebSocket connection to stream data over
|
// createWebsocketStream will create a WebSocket connection to stream data over
|
||||||
// It also handles redirects from Access and will present that flow if
|
// It also handles redirects from Access and will present that flow if
|
||||||
// the token is not present on the request
|
// the token is not present on the request
|
||||||
func createWebsocketStream(options *StartOptions, logger logger.Service) (*cfwebsocket.Conn, error) {
|
func createWebsocketStream(options *StartOptions, log *zerolog.Logger) (*cfwebsocket.Conn, error) {
|
||||||
req, err := http.NewRequest(http.MethodGet, options.OriginURL, nil)
|
req, err := http.NewRequest(http.MethodGet, options.OriginURL, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -82,13 +83,13 @@ func createWebsocketStream(options *StartOptions, logger logger.Service) (*cfweb
|
||||||
req.Header = options.Headers
|
req.Header = options.Headers
|
||||||
|
|
||||||
dump, err := httputil.DumpRequest(req, false)
|
dump, err := httputil.DumpRequest(req, false)
|
||||||
logger.Debugf("Websocket request: %s", string(dump))
|
log.Debug().Msgf("Websocket request: %s", string(dump))
|
||||||
|
|
||||||
wsConn, resp, err := cfwebsocket.ClientConnect(req, nil)
|
wsConn, resp, err := cfwebsocket.ClientConnect(req, nil)
|
||||||
defer closeRespBody(resp)
|
defer closeRespBody(resp)
|
||||||
|
|
||||||
if err != nil && IsAccessResponse(resp) {
|
if err != nil && IsAccessResponse(resp) {
|
||||||
wsConn, err = createAccessAuthenticatedStream(options, logger)
|
wsConn, err = createAccessAuthenticatedStream(options, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -104,8 +105,8 @@ func createWebsocketStream(options *StartOptions, logger logger.Service) (*cfweb
|
||||||
// this probably means the token in storage is invalid (expired/revoked). If that
|
// this probably means the token in storage is invalid (expired/revoked). If that
|
||||||
// happens it deletes the token and runs the connection again, so the user can
|
// happens it deletes the token and runs the connection again, so the user can
|
||||||
// login again and generate a new one.
|
// login again and generate a new one.
|
||||||
func createAccessAuthenticatedStream(options *StartOptions, logger logger.Service) (*websocket.Conn, error) {
|
func createAccessAuthenticatedStream(options *StartOptions, log *zerolog.Logger) (*websocket.Conn, error) {
|
||||||
wsConn, resp, err := createAccessWebSocketStream(options, logger)
|
wsConn, resp, err := createAccessWebSocketStream(options, log)
|
||||||
defer closeRespBody(resp)
|
defer closeRespBody(resp)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
return wsConn, nil
|
return wsConn, nil
|
||||||
|
@ -123,7 +124,7 @@ func createAccessAuthenticatedStream(options *StartOptions, logger logger.Servic
|
||||||
if err := token.RemoveTokenIfExists(originReq.URL); err != nil {
|
if err := token.RemoveTokenIfExists(originReq.URL); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
wsConn, resp, err = createAccessWebSocketStream(options, logger)
|
wsConn, resp, err = createAccessWebSocketStream(options, log)
|
||||||
defer closeRespBody(resp)
|
defer closeRespBody(resp)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -133,23 +134,23 @@ func createAccessAuthenticatedStream(options *StartOptions, logger logger.Servic
|
||||||
}
|
}
|
||||||
|
|
||||||
// createAccessWebSocketStream builds an Access request and makes a connection
|
// createAccessWebSocketStream builds an Access request and makes a connection
|
||||||
func createAccessWebSocketStream(options *StartOptions, logger logger.Service) (*websocket.Conn, *http.Response, error) {
|
func createAccessWebSocketStream(options *StartOptions, log *zerolog.Logger) (*websocket.Conn, *http.Response, error) {
|
||||||
req, err := BuildAccessRequest(options, logger)
|
req, err := BuildAccessRequest(options, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, nil, err
|
return nil, nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
dump, err := httputil.DumpRequest(req, false)
|
dump, err := httputil.DumpRequest(req, false)
|
||||||
logger.Debugf("Access Websocket request: %s", string(dump))
|
log.Debug().Msgf("Access Websocket request: %s", string(dump))
|
||||||
|
|
||||||
conn, resp, err := cfwebsocket.ClientConnect(req, nil)
|
conn, resp, err := cfwebsocket.ClientConnect(req, nil)
|
||||||
|
|
||||||
if resp != nil {
|
if resp != nil {
|
||||||
r, err := httputil.DumpResponse(resp, true)
|
r, err := httputil.DumpResponse(resp, true)
|
||||||
if r != nil {
|
if r != nil {
|
||||||
logger.Debugf("Websocket response: %q", r)
|
log.Debug().Msgf("Websocket response: %q", r)
|
||||||
} else if err != nil {
|
} else if err != nil {
|
||||||
logger.Debugf("Websocket response error: %v", err)
|
log.Debug().Msgf("Websocket response error: %v", err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -5,17 +5,18 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/carrier"
|
"github.com/cloudflare/cloudflared/carrier"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/h2mux"
|
"github.com/cloudflare/cloudflared/h2mux"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/cloudflare/cloudflared/logger"
|
||||||
"github.com/cloudflare/cloudflared/validation"
|
"github.com/cloudflare/cloudflared/validation"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
// StartForwarder starts a client side websocket forward
|
// StartForwarder starts a client side websocket forward
|
||||||
func StartForwarder(forwarder config.Forwarder, shutdown <-chan struct{}, logger logger.Service) error {
|
func StartForwarder(forwarder config.Forwarder, shutdown <-chan struct{}, log *zerolog.Logger) error {
|
||||||
validURL, err := validation.ValidateUrl(forwarder.Listener)
|
validURL, err := validation.ValidateUrl(forwarder.Listener)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return errors.Wrap(err, "error validating origin URL")
|
return errors.Wrap(err, "error validating origin URL")
|
||||||
|
@ -41,9 +42,9 @@ func StartForwarder(forwarder config.Forwarder, shutdown <-chan struct{}, logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// we could add a cmd line variable for this bool if we want the SOCK5 server to be on the client side
|
// we could add a cmd line variable for this bool if we want the SOCK5 server to be on the client side
|
||||||
wsConn := carrier.NewWSConnection(logger, false)
|
wsConn := carrier.NewWSConnection(log, false)
|
||||||
|
|
||||||
logger.Infof("Start Websocket listener on: %s", validURL.Host)
|
log.Info().Msgf("Start Websocket listener on: %s", validURL.Host)
|
||||||
return carrier.StartForwarder(wsConn, validURL.Host, shutdown, options)
|
return carrier.StartForwarder(wsConn, validURL.Host, shutdown, options)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -52,10 +53,7 @@ func StartForwarder(forwarder config.Forwarder, shutdown <-chan struct{}, logger
|
||||||
// useful for proxying other protocols (like ssh) over websockets
|
// useful for proxying other protocols (like ssh) over websockets
|
||||||
// (which you can put Access in front of)
|
// (which you can put Access in front of)
|
||||||
func ssh(c *cli.Context) error {
|
func ssh(c *cli.Context) error {
|
||||||
logger, err := logger.CreateSSHLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateSSHLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return cliutil.PrintLoggerSetupError("error setting up logger", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the hostname from the cmdline and error out if its not provided
|
// get the hostname from the cmdline and error out if its not provided
|
||||||
rawHostName := c.String(sshHostnameFlag)
|
rawHostName := c.String(sshHostnameFlag)
|
||||||
|
@ -85,19 +83,19 @@ func ssh(c *cli.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// we could add a cmd line variable for this bool if we want the SOCK5 server to be on the client side
|
// we could add a cmd line variable for this bool if we want the SOCK5 server to be on the client side
|
||||||
wsConn := carrier.NewWSConnection(logger, false)
|
wsConn := carrier.NewWSConnection(log, false)
|
||||||
|
|
||||||
if c.NArg() > 0 || c.IsSet(sshURLFlag) {
|
if c.NArg() > 0 || c.IsSet(sshURLFlag) {
|
||||||
forwarder, err := config.ValidateUrl(c, true)
|
forwarder, err := config.ValidateUrl(c, true)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Error validating origin URL: %s", err)
|
log.Error().Msgf("Error validating origin URL: %s", err)
|
||||||
return errors.Wrap(err, "error validating origin URL")
|
return errors.Wrap(err, "error validating origin URL")
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Infof("Start Websocket listener on: %s", forwarder.Host)
|
log.Info().Msgf("Start Websocket listener on: %s", forwarder.Host)
|
||||||
err = carrier.StartForwarder(wsConn, forwarder.Host, shutdownC, options)
|
err = carrier.StartForwarder(wsConn, forwarder.Host, shutdownC, options)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Error on Websocket listener: %s", err)
|
log.Error().Msgf("Error on Websocket listener: %s", err)
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
@ -17,11 +17,12 @@ import (
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/cloudflare/cloudflared/logger"
|
||||||
"github.com/cloudflare/cloudflared/sshgen"
|
"github.com/cloudflare/cloudflared/sshgen"
|
||||||
"github.com/cloudflare/cloudflared/validation"
|
"github.com/cloudflare/cloudflared/validation"
|
||||||
"github.com/pkg/errors"
|
|
||||||
"golang.org/x/net/idna"
|
|
||||||
|
|
||||||
"github.com/getsentry/raven-go"
|
"github.com/getsentry/raven-go"
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
"golang.org/x/net/idna"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -205,20 +206,17 @@ func login(c *cli.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
args := c.Args()
|
args := c.Args()
|
||||||
rawURL := ensureURLScheme(args.First())
|
rawURL := ensureURLScheme(args.First())
|
||||||
appURL, err := url.Parse(rawURL)
|
appURL, err := url.Parse(rawURL)
|
||||||
if args.Len() < 1 || err != nil {
|
if args.Len() < 1 || err != nil {
|
||||||
logger.Errorf("Please provide the url of the Access application\n")
|
log.Error().Msgf("Please provide the url of the Access application\n")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := verifyTokenAtEdge(appURL, c, logger); err != nil {
|
if err := verifyTokenAtEdge(appURL, c, log); err != nil {
|
||||||
logger.Errorf("Could not verify token: %s", err)
|
log.Error().Msgf("Could not verify token: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -250,19 +248,16 @@ func curl(c *cli.Context) error {
|
||||||
if err := raven.SetDSN(sentryDSN); err != nil {
|
if err := raven.SetDSN(sentryDSN); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
args := c.Args()
|
args := c.Args()
|
||||||
if args.Len() < 1 {
|
if args.Len() < 1 {
|
||||||
logger.Error("Please provide the access app and command you wish to run.")
|
log.Error().Msg("Please provide the access app and command you wish to run.")
|
||||||
return errors.New("incorrect args")
|
return errors.New("incorrect args")
|
||||||
}
|
}
|
||||||
|
|
||||||
cmdArgs, allowRequest := parseAllowRequest(args.Slice())
|
cmdArgs, allowRequest := parseAllowRequest(args.Slice())
|
||||||
appURL, err := getAppURL(cmdArgs, logger)
|
appURL, err := getAppURL(cmdArgs, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -270,12 +265,12 @@ func curl(c *cli.Context) error {
|
||||||
tok, err := token.GetAppTokenIfExists(appURL)
|
tok, err := token.GetAppTokenIfExists(appURL)
|
||||||
if err != nil || tok == "" {
|
if err != nil || tok == "" {
|
||||||
if allowRequest {
|
if allowRequest {
|
||||||
logger.Info("You don't have an Access token set. Please run access token <access application> to fetch one.")
|
log.Info().Msg("You don't have an Access token set. Please run access token <access application> to fetch one.")
|
||||||
return shell.Run("curl", cmdArgs...)
|
return shell.Run("curl", cmdArgs...)
|
||||||
}
|
}
|
||||||
tok, err = token.FetchToken(appURL, logger)
|
tok, err = token.FetchToken(appURL, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Failed to refresh token: %s", err)
|
log.Error().Msgf("Failed to refresh token: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -329,10 +324,7 @@ func sshConfig(c *cli.Context) error {
|
||||||
|
|
||||||
// sshGen generates a short lived certificate for provided hostname
|
// sshGen generates a short lived certificate for provided hostname
|
||||||
func sshGen(c *cli.Context) error {
|
func sshGen(c *cli.Context) error {
|
||||||
logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
// get the hostname from the cmdline and error out if its not provided
|
// get the hostname from the cmdline and error out if its not provided
|
||||||
rawHostName := c.String(sshHostnameFlag)
|
rawHostName := c.String(sshHostnameFlag)
|
||||||
|
@ -349,7 +341,7 @@ func sshGen(c *cli.Context) error {
|
||||||
// this fetchToken function mutates the appURL param. We should refactor that
|
// this fetchToken function mutates the appURL param. We should refactor that
|
||||||
fetchTokenURL := &url.URL{}
|
fetchTokenURL := &url.URL{}
|
||||||
*fetchTokenURL = *originURL
|
*fetchTokenURL = *originURL
|
||||||
cfdToken, err := token.FetchTokenWithRedirect(fetchTokenURL, logger)
|
cfdToken, err := token.FetchTokenWithRedirect(fetchTokenURL, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -362,15 +354,15 @@ func sshGen(c *cli.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// getAppURL will pull the appURL needed for fetching a user's Access token
|
// getAppURL will pull the appURL needed for fetching a user's Access token
|
||||||
func getAppURL(cmdArgs []string, logger logger.Service) (*url.URL, error) {
|
func getAppURL(cmdArgs []string, log *zerolog.Logger) (*url.URL, error) {
|
||||||
if len(cmdArgs) < 1 {
|
if len(cmdArgs) < 1 {
|
||||||
logger.Error("Please provide a valid URL as the first argument to curl.")
|
log.Error().Msg("Please provide a valid URL as the first argument to curl.")
|
||||||
return nil, errors.New("not a valid url")
|
return nil, errors.New("not a valid url")
|
||||||
}
|
}
|
||||||
|
|
||||||
u, err := processURL(cmdArgs[0])
|
u, err := processURL(cmdArgs[0])
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Error("Please provide a valid URL as the first argument to curl.")
|
log.Error().Msg("Please provide a valid URL as the first argument to curl.")
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -436,7 +428,7 @@ func isFileThere(candidate string) bool {
|
||||||
// verifyTokenAtEdge checks for a token on disk, or generates a new one.
|
// verifyTokenAtEdge checks for a token on disk, or generates a new one.
|
||||||
// Then makes a request to to the origin with the token to ensure it is valid.
|
// Then makes a request to to the origin with the token to ensure it is valid.
|
||||||
// Returns nil if token is valid.
|
// Returns nil if token is valid.
|
||||||
func verifyTokenAtEdge(appUrl *url.URL, c *cli.Context, logger logger.Service) error {
|
func verifyTokenAtEdge(appUrl *url.URL, c *cli.Context, log *zerolog.Logger) error {
|
||||||
headers := buildRequestHeaders(c.StringSlice(sshHeaderFlag))
|
headers := buildRequestHeaders(c.StringSlice(sshHeaderFlag))
|
||||||
if c.IsSet(sshTokenIDFlag) {
|
if c.IsSet(sshTokenIDFlag) {
|
||||||
headers.Add(h2mux.CFAccessClientIDHeader, c.String(sshTokenIDFlag))
|
headers.Add(h2mux.CFAccessClientIDHeader, c.String(sshTokenIDFlag))
|
||||||
|
@ -446,7 +438,7 @@ func verifyTokenAtEdge(appUrl *url.URL, c *cli.Context, logger logger.Service) e
|
||||||
}
|
}
|
||||||
options := &carrier.StartOptions{OriginURL: appUrl.String(), Headers: headers}
|
options := &carrier.StartOptions{OriginURL: appUrl.String(), Headers: headers}
|
||||||
|
|
||||||
if valid, err := isTokenValid(options, logger); err != nil {
|
if valid, err := isTokenValid(options, log); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if valid {
|
} else if valid {
|
||||||
return nil
|
return nil
|
||||||
|
@ -456,7 +448,7 @@ func verifyTokenAtEdge(appUrl *url.URL, c *cli.Context, logger logger.Service) e
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if valid, err := isTokenValid(options, logger); err != nil {
|
if valid, err := isTokenValid(options, log); err != nil {
|
||||||
return err
|
return err
|
||||||
} else if !valid {
|
} else if !valid {
|
||||||
return errors.New("failed to verify token")
|
return errors.New("failed to verify token")
|
||||||
|
@ -466,8 +458,8 @@ func verifyTokenAtEdge(appUrl *url.URL, c *cli.Context, logger logger.Service) e
|
||||||
}
|
}
|
||||||
|
|
||||||
// isTokenValid makes a request to the origin and returns true if the response was not a 302.
|
// isTokenValid makes a request to the origin and returns true if the response was not a 302.
|
||||||
func isTokenValid(options *carrier.StartOptions, logger logger.Service) (bool, error) {
|
func isTokenValid(options *carrier.StartOptions, log *zerolog.Logger) (bool, error) {
|
||||||
req, err := carrier.BuildAccessRequest(options, logger)
|
req, err := carrier.BuildAccessRequest(options, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return false, errors.Wrap(err, "Could not create access request")
|
return false, errors.Wrap(err, "Could not create access request")
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,8 @@ package main
|
||||||
import (
|
import (
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/access"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/access"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ForwardServiceType is used to identify what kind of overwatch service this is
|
// ForwardServiceType is used to identify what kind of overwatch service this is
|
||||||
|
@ -15,12 +16,12 @@ const ForwardServiceType = "forward"
|
||||||
type ForwarderService struct {
|
type ForwarderService struct {
|
||||||
forwarder config.Forwarder
|
forwarder config.Forwarder
|
||||||
shutdown chan struct{}
|
shutdown chan struct{}
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewForwardService creates a new forwarder service
|
// NewForwardService creates a new forwarder service
|
||||||
func NewForwardService(f config.Forwarder, logger logger.Service) *ForwarderService {
|
func NewForwardService(f config.Forwarder, log *zerolog.Logger) *ForwarderService {
|
||||||
return &ForwarderService{forwarder: f, shutdown: make(chan struct{}, 1), logger: logger}
|
return &ForwarderService{forwarder: f, shutdown: make(chan struct{}, 1), log: log}
|
||||||
}
|
}
|
||||||
|
|
||||||
// Name is used to figure out this service is related to the others (normally the addr it binds to)
|
// Name is used to figure out this service is related to the others (normally the addr it binds to)
|
||||||
|
@ -46,5 +47,5 @@ func (s *ForwarderService) Shutdown() {
|
||||||
|
|
||||||
// Run is the run loop that is started by the overwatch service
|
// Run is the run loop that is started by the overwatch service
|
||||||
func (s *ForwarderService) Run() error {
|
func (s *ForwarderService) Run() error {
|
||||||
return access.StartForwarder(s.forwarder, s.shutdown, s.logger)
|
return access.StartForwarder(s.forwarder, s.shutdown, s.log)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,8 +2,9 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/tunneldns"
|
"github.com/cloudflare/cloudflared/tunneldns"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ResolverServiceType is used to identify what kind of overwatch service this is
|
// ResolverServiceType is used to identify what kind of overwatch service this is
|
||||||
|
@ -15,14 +16,14 @@ const ResolverServiceType = "resolver"
|
||||||
type ResolverService struct {
|
type ResolverService struct {
|
||||||
resolver config.DNSResolver
|
resolver config.DNSResolver
|
||||||
shutdown chan struct{}
|
shutdown chan struct{}
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewResolverService creates a new resolver service
|
// NewResolverService creates a new resolver service
|
||||||
func NewResolverService(r config.DNSResolver, logger logger.Service) *ResolverService {
|
func NewResolverService(r config.DNSResolver, log *zerolog.Logger) *ResolverService {
|
||||||
return &ResolverService{resolver: r,
|
return &ResolverService{resolver: r,
|
||||||
shutdown: make(chan struct{}),
|
shutdown: make(chan struct{}),
|
||||||
logger: logger,
|
log: log,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -51,7 +52,7 @@ func (s *ResolverService) Shutdown() {
|
||||||
func (s *ResolverService) Run() error {
|
func (s *ResolverService) Run() error {
|
||||||
// create a listener
|
// create a listener
|
||||||
l, err := tunneldns.CreateListener(s.resolver.AddressOrDefault(), s.resolver.PortOrDefault(),
|
l, err := tunneldns.CreateListener(s.resolver.AddressOrDefault(), s.resolver.PortOrDefault(),
|
||||||
s.resolver.UpstreamsOrDefault(), s.resolver.BootstrapsOrDefault(), s.logger)
|
s.resolver.UpstreamsOrDefault(), s.resolver.BootstrapsOrDefault(), s.log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -60,14 +61,14 @@ func (s *ResolverService) Run() error {
|
||||||
readySignal := make(chan struct{})
|
readySignal := make(chan struct{})
|
||||||
err = l.Start(readySignal)
|
err = l.Start(readySignal)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
l.Stop()
|
_ = l.Stop()
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
<-readySignal
|
<-readySignal
|
||||||
s.logger.Infof("start resolver on: %s:%d", s.resolver.AddressOrDefault(), s.resolver.PortOrDefault())
|
s.log.Info().Msgf("start resolver on: %s:%d", s.resolver.AddressOrDefault(), s.resolver.PortOrDefault())
|
||||||
|
|
||||||
// wait for shutdown signal
|
// wait for shutdown signal
|
||||||
<-s.shutdown
|
<-s.shutdown
|
||||||
s.logger.Infof("shutdown on: %s:%d", s.resolver.AddressOrDefault(), s.resolver.PortOrDefault())
|
s.log.Info().Msgf("shutdown on: %s:%d", s.resolver.AddressOrDefault(), s.resolver.PortOrDefault())
|
||||||
return l.Stop()
|
return l.Stop()
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,8 +2,9 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/overwatch"
|
"github.com/cloudflare/cloudflared/overwatch"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// AppService is the main service that runs when no command lines flags are passed to cloudflared
|
// AppService is the main service that runs when no command lines flags are passed to cloudflared
|
||||||
|
@ -13,17 +14,17 @@ type AppService struct {
|
||||||
serviceManager overwatch.Manager
|
serviceManager overwatch.Manager
|
||||||
shutdownC chan struct{}
|
shutdownC chan struct{}
|
||||||
configUpdateChan chan config.Root
|
configUpdateChan chan config.Root
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewAppService creates a new AppService with needed supporting services
|
// NewAppService creates a new AppService with needed supporting services
|
||||||
func NewAppService(configManager config.Manager, serviceManager overwatch.Manager, shutdownC chan struct{}, logger logger.Service) *AppService {
|
func NewAppService(configManager config.Manager, serviceManager overwatch.Manager, shutdownC chan struct{}, log *zerolog.Logger) *AppService {
|
||||||
return &AppService{
|
return &AppService{
|
||||||
configManager: configManager,
|
configManager: configManager,
|
||||||
serviceManager: serviceManager,
|
serviceManager: serviceManager,
|
||||||
shutdownC: shutdownC,
|
shutdownC: shutdownC,
|
||||||
configUpdateChan: make(chan config.Root),
|
configUpdateChan: make(chan config.Root),
|
||||||
logger: logger,
|
log: log,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -67,14 +68,14 @@ func (s *AppService) handleConfigUpdate(c config.Root) {
|
||||||
// handle the client forward listeners
|
// handle the client forward listeners
|
||||||
activeServices := map[string]struct{}{}
|
activeServices := map[string]struct{}{}
|
||||||
for _, f := range c.Forwarders {
|
for _, f := range c.Forwarders {
|
||||||
service := NewForwardService(f, s.logger)
|
service := NewForwardService(f, s.log)
|
||||||
s.serviceManager.Add(service)
|
s.serviceManager.Add(service)
|
||||||
activeServices[service.Name()] = struct{}{}
|
activeServices[service.Name()] = struct{}{}
|
||||||
}
|
}
|
||||||
|
|
||||||
// handle resolver changes
|
// handle resolver changes
|
||||||
if c.Resolver.Enabled {
|
if c.Resolver.Enabled {
|
||||||
service := NewResolverService(c.Resolver, s.logger)
|
service := NewResolverService(c.Resolver, s.log)
|
||||||
s.serviceManager.Add(service)
|
s.serviceManager.Add(service)
|
||||||
activeServices[service.Name()] = struct{}{}
|
activeServices[service.Name()] = struct{}{}
|
||||||
|
|
||||||
|
|
|
@ -1,9 +1,8 @@
|
||||||
package buildinfo
|
package buildinfo
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type BuildInfo struct {
|
type BuildInfo struct {
|
||||||
|
@ -22,7 +21,7 @@ func GetBuildInfo(cloudflaredVersion string) *BuildInfo {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (bi *BuildInfo) Log(logger logger.Service) {
|
func (bi *BuildInfo) Log(log *zerolog.Logger) {
|
||||||
logger.Infof("Version %s", bi.CloudflaredVersion)
|
log.Info().Msgf("Version %s", bi.CloudflaredVersion)
|
||||||
logger.Infof("GOOS: %s, GOVersion: %s, GoArch: %s", bi.GoOS, bi.GoVersion, bi.GoArch)
|
log.Info().Msgf("GOOS: %s, GOVersion: %s, GoArch: %s", bi.GoOS, bi.GoVersion, bi.GoArch)
|
||||||
}
|
}
|
||||||
|
|
|
@ -2,10 +2,6 @@ package cliutil
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -27,8 +23,6 @@ func UsageError(format string, args ...interface{}) error {
|
||||||
// Ensures exit with error code if actionFunc returns an error
|
// Ensures exit with error code if actionFunc returns an error
|
||||||
func ErrorHandler(actionFunc cli.ActionFunc) cli.ActionFunc {
|
func ErrorHandler(actionFunc cli.ActionFunc) cli.ActionFunc {
|
||||||
return func(ctx *cli.Context) error {
|
return func(ctx *cli.Context) error {
|
||||||
defer logger.SharedWriteManager.Shutdown()
|
|
||||||
|
|
||||||
err := actionFunc(ctx)
|
err := actionFunc(ctx)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if _, ok := err.(usageError); ok {
|
if _, ok := err.(usageError); ok {
|
||||||
|
@ -41,15 +35,3 @@ func ErrorHandler(actionFunc cli.ActionFunc) cli.ActionFunc {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// PrintLoggerSetupError returns an error to stdout to notify when a logger can't start
|
|
||||||
func PrintLoggerSetupError(msg string, err error) error {
|
|
||||||
l, le := logger.New()
|
|
||||||
if le != nil {
|
|
||||||
log.Printf("%s: %s", msg, err)
|
|
||||||
} else {
|
|
||||||
l.Errorf("%s: %s", msg, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
return errors.Wrap(err, msg)
|
|
||||||
}
|
|
||||||
|
|
|
@ -9,13 +9,13 @@ import (
|
||||||
"runtime"
|
"runtime"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
homedir "github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/validation"
|
"github.com/cloudflare/cloudflared/validation"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -95,7 +95,7 @@ func FileExists(path string) (bool, error) {
|
||||||
}
|
}
|
||||||
return false, err
|
return false, err
|
||||||
}
|
}
|
||||||
f.Close()
|
_ = f.Close()
|
||||||
return true, nil
|
return true, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -138,7 +138,7 @@ func FindOrCreateConfigPath() string {
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
|
||||||
logDir := DefaultLogDirectory()
|
logDir := DefaultLogDirectory()
|
||||||
os.MkdirAll(logDir, os.ModePerm) //try and create it. Doesn't matter if it succeed or not, only byproduct will be no logs
|
_ = os.MkdirAll(logDir, os.ModePerm) //try and create it. Doesn't matter if it succeed or not, only byproduct will be no logs
|
||||||
|
|
||||||
c := Root{
|
c := Root{
|
||||||
LogDirectory: logDir,
|
LogDirectory: logDir,
|
||||||
|
@ -345,7 +345,7 @@ func GetConfiguration() *Configuration {
|
||||||
// ReadConfigFile returns InputSourceContext initialized from the configuration file.
|
// ReadConfigFile returns InputSourceContext initialized from the configuration file.
|
||||||
// On repeat calls returns with the same file, returns without reading the file again; however,
|
// On repeat calls returns with the same file, returns without reading the file again; however,
|
||||||
// if value of "config" flag changes, will read the new config file
|
// if value of "config" flag changes, will read the new config file
|
||||||
func ReadConfigFile(c *cli.Context, log logger.Service) (*configFileSettings, error) {
|
func ReadConfigFile(c *cli.Context, log *zerolog.Logger) (*configFileSettings, error) {
|
||||||
configFile := c.String("config")
|
configFile := c.String("config")
|
||||||
if configuration.Source() == configFile || configFile == "" {
|
if configuration.Source() == configFile || configFile == "" {
|
||||||
if configuration.Source() == "" {
|
if configuration.Source() == "" {
|
||||||
|
@ -354,7 +354,7 @@ func ReadConfigFile(c *cli.Context, log logger.Service) (*configFileSettings, er
|
||||||
return &configuration, nil
|
return &configuration, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
log.Debugf("Loading configuration from %s", configFile)
|
log.Debug().Msgf("Loading configuration from %s", configFile)
|
||||||
file, err := os.Open(configFile)
|
file, err := os.Open(configFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if os.IsNotExist(err) {
|
if os.IsNotExist(err) {
|
||||||
|
@ -365,7 +365,7 @@ func ReadConfigFile(c *cli.Context, log logger.Service) (*configFileSettings, er
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
if err := yaml.NewDecoder(file).Decode(&configuration); err != nil {
|
if err := yaml.NewDecoder(file).Decode(&configuration); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
log.Errorf("Configuration file %s was empty", configFile)
|
log.Error().Msgf("Configuration file %s was empty", configFile)
|
||||||
return &configuration, nil
|
return &configuration, nil
|
||||||
}
|
}
|
||||||
return nil, errors.Wrap(err, "error parsing YAML in config file at "+configFile)
|
return nil, errors.Wrap(err, "error parsing YAML in config file at "+configFile)
|
||||||
|
|
|
@ -4,9 +4,10 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/watcher"
|
"github.com/cloudflare/cloudflared/watcher"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -27,16 +28,16 @@ type FileManager struct {
|
||||||
watcher watcher.Notifier
|
watcher watcher.Notifier
|
||||||
notifier Notifier
|
notifier Notifier
|
||||||
configPath string
|
configPath string
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
ReadConfig func(string, logger.Service) (Root, error)
|
ReadConfig func(string, *zerolog.Logger) (Root, error)
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewFileManager creates a config manager
|
// NewFileManager creates a config manager
|
||||||
func NewFileManager(watcher watcher.Notifier, configPath string, logger logger.Service) (*FileManager, error) {
|
func NewFileManager(watcher watcher.Notifier, configPath string, log *zerolog.Logger) (*FileManager, error) {
|
||||||
m := &FileManager{
|
m := &FileManager{
|
||||||
watcher: watcher,
|
watcher: watcher,
|
||||||
configPath: configPath,
|
configPath: configPath,
|
||||||
logger: logger,
|
log: log,
|
||||||
ReadConfig: readConfigFromPath,
|
ReadConfig: readConfigFromPath,
|
||||||
}
|
}
|
||||||
err := watcher.Add(configPath)
|
err := watcher.Add(configPath)
|
||||||
|
@ -60,7 +61,7 @@ func (m *FileManager) Start(notifier Notifier) error {
|
||||||
|
|
||||||
// GetConfig reads the yaml file from the disk
|
// GetConfig reads the yaml file from the disk
|
||||||
func (m *FileManager) GetConfig() (Root, error) {
|
func (m *FileManager) GetConfig() (Root, error) {
|
||||||
return m.ReadConfig(m.configPath, m.logger)
|
return m.ReadConfig(m.configPath, m.log)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Shutdown stops the watcher
|
// Shutdown stops the watcher
|
||||||
|
@ -68,7 +69,7 @@ func (m *FileManager) Shutdown() {
|
||||||
m.watcher.Shutdown()
|
m.watcher.Shutdown()
|
||||||
}
|
}
|
||||||
|
|
||||||
func readConfigFromPath(configPath string, log logger.Service) (Root, error) {
|
func readConfigFromPath(configPath string, log *zerolog.Logger) (Root, error) {
|
||||||
if configPath == "" {
|
if configPath == "" {
|
||||||
return Root{}, errors.New("unable to find config file")
|
return Root{}, errors.New("unable to find config file")
|
||||||
}
|
}
|
||||||
|
@ -82,7 +83,7 @@ func readConfigFromPath(configPath string, log logger.Service) (Root, error) {
|
||||||
var config Root
|
var config Root
|
||||||
if err := yaml.NewDecoder(file).Decode(&config); err != nil {
|
if err := yaml.NewDecoder(file).Decode(&config); err != nil {
|
||||||
if err == io.EOF {
|
if err == io.EOF {
|
||||||
log.Errorf("Configuration file %s was empty", configPath)
|
log.Error().Msgf("Configuration file %s was empty", configPath)
|
||||||
return Root{}, nil
|
return Root{}, nil
|
||||||
}
|
}
|
||||||
return Root{}, errors.Wrap(err, "error parsing YAML in config file at "+configPath)
|
return Root{}, errors.Wrap(err, "error parsing YAML in config file at "+configPath)
|
||||||
|
@ -98,14 +99,14 @@ func readConfigFromPath(configPath string, log logger.Service) (Root, error) {
|
||||||
func (m *FileManager) WatcherItemDidChange(filepath string) {
|
func (m *FileManager) WatcherItemDidChange(filepath string) {
|
||||||
config, err := m.GetConfig()
|
config, err := m.GetConfig()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
m.logger.Errorf("Failed to read new config: %s", err)
|
m.log.Error().Msgf("Failed to read new config: %s", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
m.logger.Info("Config file has been updated")
|
m.log.Info().Msg("Config file has been updated")
|
||||||
m.notifier.ConfigDidUpdate(config)
|
m.notifier.ConfigDidUpdate(config)
|
||||||
}
|
}
|
||||||
|
|
||||||
// WatcherDidError notifies of errors with the file watcher
|
// WatcherDidError notifies of errors with the file watcher
|
||||||
func (m *FileManager) WatcherDidError(err error) {
|
func (m *FileManager) WatcherDidError(err error) {
|
||||||
m.logger.Errorf("Config watcher encountered an error: %s", err)
|
m.log.Error().Msgf("Config watcher encountered an error: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,10 +4,10 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/watcher"
|
"github.com/cloudflare/cloudflared/watcher"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
type mockNotifier struct {
|
type mockNotifier struct {
|
||||||
|
@ -46,8 +46,8 @@ func TestConfigChanged(t *testing.T) {
|
||||||
f, err := os.Create(filePath)
|
f, err := os.Create(filePath)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
defer func() {
|
defer func() {
|
||||||
f.Close()
|
_ = f.Close()
|
||||||
os.Remove(filePath)
|
_ = os.Remove(filePath)
|
||||||
}()
|
}()
|
||||||
c := &Root{
|
c := &Root{
|
||||||
Forwarders: []Forwarder{
|
Forwarders: []Forwarder{
|
||||||
|
@ -57,15 +57,15 @@ func TestConfigChanged(t *testing.T) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
configRead := func(configPath string, log logger.Service) (Root, error) {
|
configRead := func(configPath string, log *zerolog.Logger) (Root, error) {
|
||||||
return *c, nil
|
return *c, nil
|
||||||
}
|
}
|
||||||
wait := make(chan struct{})
|
wait := make(chan struct{})
|
||||||
w := &mockFileWatcher{path: filePath, ready: wait}
|
w := &mockFileWatcher{path: filePath, ready: wait}
|
||||||
|
|
||||||
logger := logger.NewOutputWriter(logger.NewMockWriteManager())
|
log := zerolog.Nop()
|
||||||
|
|
||||||
service, err := NewFileManager(w, filePath, logger)
|
service, err := NewFileManager(w, filePath, &log)
|
||||||
service.ReadConfig = configRead
|
service.ReadConfig = configRead
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
|
|
@ -7,13 +7,13 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
cli "github.com/urfave/cli/v2"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/cloudflare/cloudflared/logger"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
||||||
|
@ -21,7 +21,7 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
||||||
Name: "service",
|
Name: "service",
|
||||||
Usage: "Manages the Argo Tunnel system service",
|
Usage: "Manages the Argo Tunnel system service",
|
||||||
Subcommands: []*cli.Command{
|
Subcommands: []*cli.Command{
|
||||||
&cli.Command{
|
{
|
||||||
Name: "install",
|
Name: "install",
|
||||||
Usage: "Install Argo Tunnel as a system service",
|
Usage: "Install Argo Tunnel as a system service",
|
||||||
Action: cliutil.ErrorHandler(installLinuxService),
|
Action: cliutil.ErrorHandler(installLinuxService),
|
||||||
|
@ -32,7 +32,7 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
&cli.Command{
|
{
|
||||||
Name: "uninstall",
|
Name: "uninstall",
|
||||||
Usage: "Uninstall the Argo Tunnel service",
|
Usage: "Uninstall the Argo Tunnel service",
|
||||||
Action: cliutil.ErrorHandler(uninstallLinuxService),
|
Action: cliutil.ErrorHandler(uninstallLinuxService),
|
||||||
|
@ -190,7 +190,7 @@ func isSystemd() bool {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
func copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile string, logger logger.Service) error {
|
func copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile string, log *zerolog.Logger) error {
|
||||||
srcCredentialPath := filepath.Join(userConfigDir, userCredentialFile)
|
srcCredentialPath := filepath.Join(userConfigDir, userCredentialFile)
|
||||||
destCredentialPath := filepath.Join(serviceConfigDir, serviceCredentialFile)
|
destCredentialPath := filepath.Join(serviceConfigDir, serviceCredentialFile)
|
||||||
if srcCredentialPath != destCredentialPath {
|
if srcCredentialPath != destCredentialPath {
|
||||||
|
@ -205,17 +205,14 @@ func copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile str
|
||||||
if err := copyConfig(srcConfigPath, destConfigPath); err != nil {
|
if err := copyConfig(srcConfigPath, destConfigPath); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logger.Infof("Copied %s to %s", srcConfigPath, destConfigPath)
|
log.Info().Msgf("Copied %s to %s", srcConfigPath, destConfigPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func installLinuxService(c *cli.Context) error {
|
func installLinuxService(c *cli.Context) error {
|
||||||
logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
etPath, err := os.Executable()
|
etPath, err := os.Executable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -232,8 +229,8 @@ func installLinuxService(c *cli.Context) error {
|
||||||
userConfigDir := filepath.Dir(c.String("config"))
|
userConfigDir := filepath.Dir(c.String("config"))
|
||||||
userConfigFile := filepath.Base(c.String("config"))
|
userConfigFile := filepath.Base(c.String("config"))
|
||||||
userCredentialFile := config.DefaultCredentialFile
|
userCredentialFile := config.DefaultCredentialFile
|
||||||
if err = copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile, logger); err != nil {
|
if err = copyUserConfiguration(userConfigDir, userConfigFile, userCredentialFile, log); err != nil {
|
||||||
logger.Errorf("Failed to copy user configuration: %s. Before running the service, ensure that %s contains two files, %s and %s", err,
|
log.Error().Msgf("Failed to copy user configuration: %s. Before running the service, ensure that %s contains two files, %s and %s", err,
|
||||||
serviceConfigDir, serviceCredentialFile, serviceConfigFile)
|
serviceConfigDir, serviceCredentialFile, serviceConfigFile)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -241,7 +238,7 @@ func installLinuxService(c *cli.Context) error {
|
||||||
"--origincert", serviceConfigDir + "/" + serviceCredentialFile,
|
"--origincert", serviceConfigDir + "/" + serviceCredentialFile,
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
src, err := config.ReadConfigFile(c, logger)
|
src, err := config.ReadConfigFile(c, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -274,42 +271,42 @@ credentials-file: CREDENTIALS-FILE
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case isSystemd():
|
case isSystemd():
|
||||||
logger.Infof("Using Systemd")
|
log.Info().Msgf("Using Systemd")
|
||||||
return installSystemd(&templateArgs, logger)
|
return installSystemd(&templateArgs, log)
|
||||||
default:
|
default:
|
||||||
logger.Infof("Using SysV")
|
log.Info().Msgf("Using SysV")
|
||||||
return installSysv(&templateArgs, logger)
|
return installSysv(&templateArgs, log)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func installSystemd(templateArgs *ServiceTemplateArgs, logger logger.Service) error {
|
func installSystemd(templateArgs *ServiceTemplateArgs, log *zerolog.Logger) error {
|
||||||
for _, serviceTemplate := range systemdTemplates {
|
for _, serviceTemplate := range systemdTemplates {
|
||||||
err := serviceTemplate.Generate(templateArgs)
|
err := serviceTemplate.Generate(templateArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("error generating service template: %s", err)
|
log.Error().Msgf("error generating service template: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if err := runCommand("systemctl", "enable", "cloudflared.service"); err != nil {
|
if err := runCommand("systemctl", "enable", "cloudflared.service"); err != nil {
|
||||||
logger.Errorf("systemctl enable cloudflared.service error: %s", err)
|
log.Error().Msgf("systemctl enable cloudflared.service error: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := runCommand("systemctl", "start", "cloudflared-update.timer"); err != nil {
|
if err := runCommand("systemctl", "start", "cloudflared-update.timer"); err != nil {
|
||||||
logger.Errorf("systemctl start cloudflared-update.timer error: %s", err)
|
log.Error().Msgf("systemctl start cloudflared-update.timer error: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logger.Infof("systemctl daemon-reload")
|
log.Info().Msgf("systemctl daemon-reload")
|
||||||
return runCommand("systemctl", "daemon-reload")
|
return runCommand("systemctl", "daemon-reload")
|
||||||
}
|
}
|
||||||
|
|
||||||
func installSysv(templateArgs *ServiceTemplateArgs, logger logger.Service) error {
|
func installSysv(templateArgs *ServiceTemplateArgs, log *zerolog.Logger) error {
|
||||||
confPath, err := sysvTemplate.ResolvePath()
|
confPath, err := sysvTemplate.ResolvePath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("error resolving system path: %s", err)
|
log.Error().Msgf("error resolving system path: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := sysvTemplate.Generate(templateArgs); err != nil {
|
if err := sysvTemplate.Generate(templateArgs); err != nil {
|
||||||
logger.Errorf("error generating system template: %s", err)
|
log.Error().Msgf("error generating system template: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, i := range [...]string{"2", "3", "4", "5"} {
|
for _, i := range [...]string{"2", "3", "4", "5"} {
|
||||||
|
@ -326,43 +323,40 @@ func installSysv(templateArgs *ServiceTemplateArgs, logger logger.Service) error
|
||||||
}
|
}
|
||||||
|
|
||||||
func uninstallLinuxService(c *cli.Context) error {
|
func uninstallLinuxService(c *cli.Context) error {
|
||||||
logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
switch {
|
switch {
|
||||||
case isSystemd():
|
case isSystemd():
|
||||||
logger.Infof("Using Systemd")
|
log.Info().Msgf("Using Systemd")
|
||||||
return uninstallSystemd(logger)
|
return uninstallSystemd(log)
|
||||||
default:
|
default:
|
||||||
logger.Infof("Using SysV")
|
log.Info().Msgf("Using SysV")
|
||||||
return uninstallSysv(logger)
|
return uninstallSysv(log)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func uninstallSystemd(logger logger.Service) error {
|
func uninstallSystemd(log *zerolog.Logger) error {
|
||||||
if err := runCommand("systemctl", "disable", "cloudflared.service"); err != nil {
|
if err := runCommand("systemctl", "disable", "cloudflared.service"); err != nil {
|
||||||
logger.Errorf("systemctl disable cloudflared.service error: %s", err)
|
log.Error().Msgf("systemctl disable cloudflared.service error: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if err := runCommand("systemctl", "stop", "cloudflared-update.timer"); err != nil {
|
if err := runCommand("systemctl", "stop", "cloudflared-update.timer"); err != nil {
|
||||||
logger.Errorf("systemctl stop cloudflared-update.timer error: %s", err)
|
log.Error().Msgf("systemctl stop cloudflared-update.timer error: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, serviceTemplate := range systemdTemplates {
|
for _, serviceTemplate := range systemdTemplates {
|
||||||
if err := serviceTemplate.Remove(); err != nil {
|
if err := serviceTemplate.Remove(); err != nil {
|
||||||
logger.Errorf("error removing service template: %s", err)
|
log.Error().Msgf("error removing service template: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.Infof("Successfully uninstall cloudflared service")
|
log.Info().Msgf("Successfully uninstall cloudflared service")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func uninstallSysv(logger logger.Service) error {
|
func uninstallSysv(log *zerolog.Logger) error {
|
||||||
if err := sysvTemplate.Remove(); err != nil {
|
if err := sysvTemplate.Remove(); err != nil {
|
||||||
logger.Errorf("error removing service template: %s", err)
|
log.Error().Msgf("error removing service template: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, i := range [...]string{"2", "3", "4", "5"} {
|
for _, i := range [...]string{"2", "3", "4", "5"} {
|
||||||
|
@ -375,6 +369,6 @@ func uninstallSysv(logger logger.Service) error {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.Infof("Successfully uninstall cloudflared service")
|
log.Info().Msgf("Successfully uninstall cloudflared service")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -34,7 +34,7 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
})
|
})
|
||||||
app.Run(os.Args)
|
_ = app.Run(os.Args)
|
||||||
}
|
}
|
||||||
|
|
||||||
func newLaunchdTemplate(installPath, stdoutPath, stderrPath string) *ServiceTemplate {
|
func newLaunchdTemplate(installPath, stdoutPath, stderrPath string) *ServiceTemplate {
|
||||||
|
@ -107,71 +107,61 @@ func stderrPath() (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func installLaunchd(c *cli.Context) error {
|
func installLaunchd(c *cli.Context) error {
|
||||||
logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
if isRootUser() {
|
if isRootUser() {
|
||||||
logger.Infof("Installing Argo Tunnel client as a system launch daemon. " +
|
log.Info().Msgf("Installing Argo Tunnel client as a system launch daemon. " +
|
||||||
"Argo Tunnel client will run at boot")
|
"Argo Tunnel client will run at boot")
|
||||||
} else {
|
} else {
|
||||||
logger.Infof("Installing Argo Tunnel client as an user launch agent. " +
|
log.Info().Msgf("Installing Argo Tunnel client as an user launch agent. " +
|
||||||
"Note that Argo Tunnel client will only run when the user is logged in. " +
|
"Note that Argo Tunnel client will only run when the user is logged in. " +
|
||||||
"If you want to run Argo Tunnel client at boot, install with root permission. " +
|
"If you want to run Argo Tunnel client at boot, install with root permission. " +
|
||||||
"For more information, visit https://developers.cloudflare.com/argo-tunnel/reference/service/")
|
"For more information, visit https://developers.cloudflare.com/argo-tunnel/reference/service/")
|
||||||
}
|
}
|
||||||
etPath, err := os.Executable()
|
etPath, err := os.Executable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Error determining executable path: %s", err)
|
log.Error().Msgf("Error determining executable path: %s", err)
|
||||||
return fmt.Errorf("Error determining executable path: %v", err)
|
return fmt.Errorf("Error determining executable path: %v", err)
|
||||||
}
|
}
|
||||||
installPath, err := installPath()
|
installPath, err := installPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Error determining install path: %s", err)
|
log.Error().Msgf("Error determining install path: %s", err)
|
||||||
return errors.Wrap(err, "Error determining install path")
|
return errors.Wrap(err, "Error determining install path")
|
||||||
}
|
}
|
||||||
stdoutPath, err := stdoutPath()
|
stdoutPath, err := stdoutPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("error determining stdout path: %s", err)
|
log.Error().Msgf("error determining stdout path: %s", err)
|
||||||
return errors.Wrap(err, "error determining stdout path")
|
return errors.Wrap(err, "error determining stdout path")
|
||||||
}
|
}
|
||||||
stderrPath, err := stderrPath()
|
stderrPath, err := stderrPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("error determining stderr path: %s", err)
|
log.Error().Msgf("error determining stderr path: %s", err)
|
||||||
return errors.Wrap(err, "error determining stderr path")
|
return errors.Wrap(err, "error determining stderr path")
|
||||||
}
|
}
|
||||||
launchdTemplate := newLaunchdTemplate(installPath, stdoutPath, stderrPath)
|
launchdTemplate := newLaunchdTemplate(installPath, stdoutPath, stderrPath)
|
||||||
if err != nil {
|
|
||||||
logger.Errorf("error creating launchd template: %s", err)
|
|
||||||
return errors.Wrap(err, "error creating launchd template")
|
|
||||||
}
|
|
||||||
templateArgs := ServiceTemplateArgs{Path: etPath}
|
templateArgs := ServiceTemplateArgs{Path: etPath}
|
||||||
err = launchdTemplate.Generate(&templateArgs)
|
err = launchdTemplate.Generate(&templateArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("error generating launchd template: %s", err)
|
log.Error().Msgf("error generating launchd template: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
plistPath, err := launchdTemplate.ResolvePath()
|
plistPath, err := launchdTemplate.ResolvePath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("error resolving launchd template path: %s", err)
|
log.Error().Msgf("error resolving launchd template path: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Infof("Outputs are logged to %s and %s", stderrPath, stdoutPath)
|
log.Info().Msgf("Outputs are logged to %s and %s", stderrPath, stdoutPath)
|
||||||
return runCommand("launchctl", "load", plistPath)
|
return runCommand("launchctl", "load", plistPath)
|
||||||
}
|
}
|
||||||
|
|
||||||
func uninstallLaunchd(c *cli.Context) error {
|
func uninstallLaunchd(c *cli.Context) error {
|
||||||
logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
if isRootUser() {
|
if isRootUser() {
|
||||||
logger.Infof("Uninstalling Argo Tunnel as a system launch daemon")
|
log.Info().Msgf("Uninstalling Argo Tunnel as a system launch daemon")
|
||||||
} else {
|
} else {
|
||||||
logger.Infof("Uninstalling Argo Tunnel as an user launch agent")
|
log.Info().Msgf("Uninstalling Argo Tunnel as an user launch agent")
|
||||||
}
|
}
|
||||||
installPath, err := installPath()
|
installPath, err := installPath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -186,20 +176,17 @@ func uninstallLaunchd(c *cli.Context) error {
|
||||||
return errors.Wrap(err, "error determining stderr path")
|
return errors.Wrap(err, "error determining stderr path")
|
||||||
}
|
}
|
||||||
launchdTemplate := newLaunchdTemplate(installPath, stdoutPath, stderrPath)
|
launchdTemplate := newLaunchdTemplate(installPath, stdoutPath, stderrPath)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error creating launchd template")
|
|
||||||
}
|
|
||||||
plistPath, err := launchdTemplate.ResolvePath()
|
plistPath, err := launchdTemplate.ResolvePath()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("error resolving launchd template path: %s", err)
|
log.Error().Msgf("error resolving launchd template path: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
err = runCommand("launchctl", "unload", plistPath)
|
err = runCommand("launchctl", "unload", plistPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("error unloading: %s", err)
|
log.Error().Msgf("error unloading: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
logger.Infof("Outputs are logged to %s and %s", stderrPath, stdoutPath)
|
log.Info().Msgf("Outputs are logged to %s and %s", stderrPath, stdoutPath)
|
||||||
return launchdTemplate.Remove()
|
return launchdTemplate.Remove()
|
||||||
}
|
}
|
||||||
|
|
|
@ -10,16 +10,16 @@ import (
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/tunnel"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/updater"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/updater"
|
||||||
log "github.com/cloudflare/cloudflared/logger"
|
"github.com/cloudflare/cloudflared/logger"
|
||||||
"github.com/cloudflare/cloudflared/metrics"
|
"github.com/cloudflare/cloudflared/metrics"
|
||||||
"github.com/cloudflare/cloudflared/overwatch"
|
"github.com/cloudflare/cloudflared/overwatch"
|
||||||
"github.com/cloudflare/cloudflared/tunneldns"
|
"github.com/cloudflare/cloudflared/tunneldns"
|
||||||
"github.com/cloudflare/cloudflared/watcher"
|
"github.com/cloudflare/cloudflared/watcher"
|
||||||
|
|
||||||
"github.com/getsentry/raven-go"
|
"github.com/getsentry/raven-go"
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -184,38 +184,33 @@ func captureError(err error) {
|
||||||
|
|
||||||
// cloudflared was started without any flags
|
// cloudflared was started without any flags
|
||||||
func handleServiceMode(c *cli.Context, shutdownC chan struct{}) error {
|
func handleServiceMode(c *cli.Context, shutdownC chan struct{}) error {
|
||||||
defer log.SharedWriteManager.Shutdown()
|
log := logger.CreateLoggerFromContext(c, logger.DisableTerminalLog)
|
||||||
|
|
||||||
logger, err := log.CreateLoggerFromContext(c, log.DisableTerminalLog)
|
|
||||||
if err != nil {
|
|
||||||
return cliutil.PrintLoggerSetupError("error setting up logger", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
// start the main run loop that reads from the config file
|
// start the main run loop that reads from the config file
|
||||||
f, err := watcher.NewFile()
|
f, err := watcher.NewFile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot load config file: %s", err)
|
log.Error().Msgf("Cannot load config file: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
configPath := config.FindOrCreateConfigPath()
|
configPath := config.FindOrCreateConfigPath()
|
||||||
configManager, err := config.NewFileManager(f, configPath, logger)
|
configManager, err := config.NewFileManager(f, configPath, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot setup config file for monitoring: %s", err)
|
log.Error().Msgf("Cannot setup config file for monitoring: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logger.Infof("monitoring config file at: %s", configPath)
|
log.Info().Msgf("monitoring config file at: %s", configPath)
|
||||||
|
|
||||||
serviceCallback := func(t string, name string, err error) {
|
serviceCallback := func(t string, name string, err error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("%s service: %s encountered an error: %s", t, name, err)
|
log.Error().Msgf("%s service: %s encountered an error: %s", t, name, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
serviceManager := overwatch.NewAppManager(serviceCallback)
|
serviceManager := overwatch.NewAppManager(serviceCallback)
|
||||||
|
|
||||||
appService := NewAppService(configManager, serviceManager, shutdownC, logger)
|
appService := NewAppService(configManager, serviceManager, shutdownC, log)
|
||||||
if err := appService.Run(); err != nil {
|
if err := appService.Run(); err != nil {
|
||||||
logger.Errorf("Failed to start app service: %s", err)
|
log.Error().Msgf("Failed to start app service: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -16,10 +16,11 @@ import (
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/path"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/path"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/transfer"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/transfer"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/origin"
|
"github.com/cloudflare/cloudflared/origin"
|
||||||
|
|
||||||
"github.com/coreos/go-oidc/jose"
|
"github.com/coreos/go-oidc/jose"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -97,7 +98,7 @@ func newLock(path string) *lock {
|
||||||
func (l *lock) Acquire() error {
|
func (l *lock) Acquire() error {
|
||||||
// Intercept SIGINT and SIGTERM to release lock before exiting
|
// Intercept SIGINT and SIGTERM to release lock before exiting
|
||||||
l.sigHandler.register(func() {
|
l.sigHandler.register(func() {
|
||||||
l.deleteLockFile()
|
_ = l.deleteLockFile()
|
||||||
os.Exit(0)
|
os.Exit(0)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -143,18 +144,18 @@ func isTokenLocked(lockFilePath string) bool {
|
||||||
|
|
||||||
// FetchTokenWithRedirect will either load a stored token or generate a new one
|
// FetchTokenWithRedirect will either load a stored token or generate a new one
|
||||||
// it appends the full url as the redirect URL to the access cli request if opening the browser
|
// it appends the full url as the redirect URL to the access cli request if opening the browser
|
||||||
func FetchTokenWithRedirect(appURL *url.URL, logger logger.Service) (string, error) {
|
func FetchTokenWithRedirect(appURL *url.URL, log *zerolog.Logger) (string, error) {
|
||||||
return getToken(appURL, false, logger)
|
return getToken(appURL, false, log)
|
||||||
}
|
}
|
||||||
|
|
||||||
// FetchToken will either load a stored token or generate a new one
|
// FetchToken will either load a stored token or generate a new one
|
||||||
// it appends the host of the appURL as the redirect URL to the access cli request if opening the browser
|
// it appends the host of the appURL as the redirect URL to the access cli request if opening the browser
|
||||||
func FetchToken(appURL *url.URL, logger logger.Service) (string, error) {
|
func FetchToken(appURL *url.URL, log *zerolog.Logger) (string, error) {
|
||||||
return getToken(appURL, true, logger)
|
return getToken(appURL, true, log)
|
||||||
}
|
}
|
||||||
|
|
||||||
// getToken will either load a stored token or generate a new one
|
// getToken will either load a stored token or generate a new one
|
||||||
func getToken(appURL *url.URL, useHostOnly bool, logger logger.Service) (string, error) {
|
func getToken(appURL *url.URL, useHostOnly bool, log *zerolog.Logger) (string, error) {
|
||||||
if token, err := GetAppTokenIfExists(appURL); token != "" && err == nil {
|
if token, err := GetAppTokenIfExists(appURL); token != "" && err == nil {
|
||||||
return token, nil
|
return token, nil
|
||||||
}
|
}
|
||||||
|
@ -179,7 +180,7 @@ func getToken(appURL *url.URL, useHostOnly bool, logger logger.Service) (string,
|
||||||
var orgTokenPath string
|
var orgTokenPath string
|
||||||
// Get auth domain to format into org token file path
|
// Get auth domain to format into org token file path
|
||||||
if authDomain, err := getAuthDomain(appURL); err != nil {
|
if authDomain, err := getAuthDomain(appURL); err != nil {
|
||||||
logger.Errorf("failed to get auth domain: %s", err)
|
log.Error().Msgf("failed to get auth domain: %s", err)
|
||||||
} else {
|
} else {
|
||||||
orgToken, err := GetOrgTokenIfExists(authDomain)
|
orgToken, err := GetOrgTokenIfExists(authDomain)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -198,7 +199,7 @@ func getToken(appURL *url.URL, useHostOnly bool, logger logger.Service) (string,
|
||||||
}
|
}
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if appToken, err := exchangeOrgToken(appURL, orgToken); err != nil {
|
if appToken, err := exchangeOrgToken(appURL, orgToken); err != nil {
|
||||||
logger.Debugf("failed to exchange org token for app token: %s", err)
|
log.Debug().Msgf("failed to exchange org token for app token: %s", err)
|
||||||
} else {
|
} else {
|
||||||
if err := ioutil.WriteFile(appTokenPath, []byte(appToken), 0600); err != nil {
|
if err := ioutil.WriteFile(appTokenPath, []byte(appToken), 0600); err != nil {
|
||||||
return "", errors.Wrap(err, "failed to write app token to disk")
|
return "", errors.Wrap(err, "failed to write app token to disk")
|
||||||
|
@ -207,19 +208,19 @@ func getToken(appURL *url.URL, useHostOnly bool, logger logger.Service) (string,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return getTokensFromEdge(appURL, appTokenPath, orgTokenPath, useHostOnly, logger)
|
return getTokensFromEdge(appURL, appTokenPath, orgTokenPath, useHostOnly, log)
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// getTokensFromEdge will attempt to use the transfer service to retrieve an app and org token, save them to disk,
|
// getTokensFromEdge will attempt to use the transfer service to retrieve an app and org token, save them to disk,
|
||||||
// and return the app token.
|
// and return the app token.
|
||||||
func getTokensFromEdge(appURL *url.URL, appTokenPath, orgTokenPath string, useHostOnly bool, logger logger.Service) (string, error) {
|
func getTokensFromEdge(appURL *url.URL, appTokenPath, orgTokenPath string, useHostOnly bool, log *zerolog.Logger) (string, error) {
|
||||||
// If no org token exists or if it couldnt be exchanged for an app token, then run the transfer service flow.
|
// If no org token exists or if it couldnt be exchanged for an app token, then run the transfer service flow.
|
||||||
|
|
||||||
// this weird parameter is the resource name (token) and the key/value
|
// this weird parameter is the resource name (token) and the key/value
|
||||||
// we want to send to the transfer service. the key is token and the value
|
// we want to send to the transfer service. the key is token and the value
|
||||||
// is blank (basically just the id generated in the transfer service)
|
// is blank (basically just the id generated in the transfer service)
|
||||||
resourceData, err := transfer.Run(appURL, keyName, keyName, "", true, useHostOnly, logger)
|
resourceData, err := transfer.Run(appURL, keyName, keyName, "", true, useHostOnly, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return "", errors.Wrap(err, "failed to run transfer service")
|
return "", errors.Wrap(err, "failed to run transfer service")
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,8 +12,8 @@ import (
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/encrypter"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/encrypter"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/shell"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/shell"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -27,7 +27,7 @@ const (
|
||||||
// The "dance" we refer to is building a HTTP request, opening that in a browser waiting for
|
// The "dance" we refer to is building a HTTP request, opening that in a browser waiting for
|
||||||
// the user to complete an action, while it long polls in the background waiting for an
|
// the user to complete an action, while it long polls in the background waiting for an
|
||||||
// action to be completed to download the resource.
|
// action to be completed to download the resource.
|
||||||
func Run(transferURL *url.URL, resourceName, key, value string, shouldEncrypt bool, useHostOnly bool, logger logger.Service) ([]byte, error) {
|
func Run(transferURL *url.URL, resourceName, key, value string, shouldEncrypt bool, useHostOnly bool, log *zerolog.Logger) ([]byte, error) {
|
||||||
encrypterClient, err := encrypter.New("cloudflared_priv.pem", "cloudflared_pub.pem")
|
encrypterClient, err := encrypter.New("cloudflared_priv.pem", "cloudflared_pub.pem")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -48,7 +48,7 @@ func Run(transferURL *url.URL, resourceName, key, value string, shouldEncrypt bo
|
||||||
var resourceData []byte
|
var resourceData []byte
|
||||||
|
|
||||||
if shouldEncrypt {
|
if shouldEncrypt {
|
||||||
buf, key, err := transferRequest(baseStoreURL+"transfer/"+encrypterClient.PublicKey(), logger)
|
buf, key, err := transferRequest(baseStoreURL+"transfer/"+encrypterClient.PublicKey(), log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -64,7 +64,7 @@ func Run(transferURL *url.URL, resourceName, key, value string, shouldEncrypt bo
|
||||||
|
|
||||||
resourceData = decrypted
|
resourceData = decrypted
|
||||||
} else {
|
} else {
|
||||||
buf, _, err := transferRequest(baseStoreURL+encrypterClient.PublicKey(), logger)
|
buf, _, err := transferRequest(baseStoreURL+encrypterClient.PublicKey(), log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -96,17 +96,17 @@ func buildRequestURL(baseURL *url.URL, key, value string, cli, useHostOnly bool)
|
||||||
}
|
}
|
||||||
|
|
||||||
// transferRequest downloads the requested resource from the request URL
|
// transferRequest downloads the requested resource from the request URL
|
||||||
func transferRequest(requestURL string, logger logger.Service) ([]byte, string, error) {
|
func transferRequest(requestURL string, log *zerolog.Logger) ([]byte, string, error) {
|
||||||
client := &http.Client{Timeout: clientTimeout}
|
client := &http.Client{Timeout: clientTimeout}
|
||||||
const pollAttempts = 10
|
const pollAttempts = 10
|
||||||
// we do "long polling" on the endpoint to get the resource.
|
// we do "long polling" on the endpoint to get the resource.
|
||||||
for i := 0; i < pollAttempts; i++ {
|
for i := 0; i < pollAttempts; i++ {
|
||||||
buf, key, err := poll(client, requestURL, logger)
|
buf, key, err := poll(client, requestURL, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
} else if len(buf) > 0 {
|
} else if len(buf) > 0 {
|
||||||
if err := putSuccess(client, requestURL); err != nil {
|
if err := putSuccess(client, requestURL); err != nil {
|
||||||
logger.Errorf("Failed to update resource success: %s", err)
|
log.Error().Msgf("Failed to update resource success: %s", err)
|
||||||
}
|
}
|
||||||
return buf, key, nil
|
return buf, key, nil
|
||||||
}
|
}
|
||||||
|
@ -115,7 +115,7 @@ func transferRequest(requestURL string, logger logger.Service) ([]byte, string,
|
||||||
}
|
}
|
||||||
|
|
||||||
// poll the endpoint for the request resource, waiting for the user interaction
|
// poll the endpoint for the request resource, waiting for the user interaction
|
||||||
func poll(client *http.Client, requestURL string, logger logger.Service) ([]byte, string, error) {
|
func poll(client *http.Client, requestURL string, log *zerolog.Logger) ([]byte, string, error) {
|
||||||
resp, err := client.Get(requestURL)
|
resp, err := client.Get(requestURL)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, "", err
|
return nil, "", err
|
||||||
|
@ -128,7 +128,7 @@ func poll(client *http.Client, requestURL string, logger logger.Service) ([]byte
|
||||||
return nil, "", fmt.Errorf("error on request %d", resp.StatusCode)
|
return nil, "", fmt.Errorf("error on request %d", resp.StatusCode)
|
||||||
}
|
}
|
||||||
if resp.StatusCode != 200 {
|
if resp.StatusCode != 200 {
|
||||||
logger.Info("Waiting for login...")
|
log.Info().Msg("Waiting for login...")
|
||||||
return nil, "", nil
|
return nil, "", nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -35,6 +35,7 @@ import (
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"github.com/urfave/cli/v2/altsrc"
|
"github.com/urfave/cli/v2/altsrc"
|
||||||
)
|
)
|
||||||
|
@ -42,8 +43,6 @@ import (
|
||||||
const (
|
const (
|
||||||
sentryDSN = "https://56a9c9fa5c364ab28f34b14f35ea0f1b:3e8827f6f9f740738eb11138f7bebb68@sentry.io/189878"
|
sentryDSN = "https://56a9c9fa5c364ab28f34b14f35ea0f1b:3e8827f6f9f740738eb11138f7bebb68@sentry.io/189878"
|
||||||
|
|
||||||
sshLogFileDirectory = "/usr/local/var/log/cloudflared/"
|
|
||||||
|
|
||||||
// sshPortFlag is the port on localhost the cloudflared ssh server will run on
|
// sshPortFlag is the port on localhost the cloudflared ssh server will run on
|
||||||
sshPortFlag = "local-ssh-port"
|
sshPortFlag = "local-ssh-port"
|
||||||
|
|
||||||
|
@ -174,14 +173,14 @@ func runAdhocNamedTunnel(sc *subcommandContext, name string) error {
|
||||||
return errors.Wrap(err, "failed to create tunnel")
|
return errors.Wrap(err, "failed to create tunnel")
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
sc.logger.Infof("Tunnel already created with ID %s", tunnel.ID)
|
sc.log.Info().Msgf("Tunnel already created with ID %s", tunnel.ID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if r, ok := routeFromFlag(sc.c); ok {
|
if r, ok := routeFromFlag(sc.c); ok {
|
||||||
if res, err := sc.route(tunnel.ID, r); err != nil {
|
if res, err := sc.route(tunnel.ID, r); err != nil {
|
||||||
sc.logger.Errorf("failed to create route, please create it manually. err: %v.", err)
|
sc.log.Error().Msgf("failed to create route, please create it manually. err: %v.", err)
|
||||||
} else {
|
} else {
|
||||||
sc.logger.Infof(res.SuccessSummary())
|
sc.log.Info().Msgf(res.SuccessSummary())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -194,7 +193,7 @@ func runAdhocNamedTunnel(sc *subcommandContext, name string) error {
|
||||||
|
|
||||||
// runClassicTunnel creates a "classic" non-named tunnel
|
// runClassicTunnel creates a "classic" non-named tunnel
|
||||||
func runClassicTunnel(sc *subcommandContext) error {
|
func runClassicTunnel(sc *subcommandContext) error {
|
||||||
return StartServer(sc.c, version, shutdownC, graceShutdownC, nil, sc.logger, sc.isUIEnabled)
|
return StartServer(sc.c, version, shutdownC, graceShutdownC, nil, sc.log, sc.isUIEnabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
func routeFromFlag(c *cli.Context) (tunnelstore.Route, bool) {
|
func routeFromFlag(c *cli.Context) (tunnelstore.Route, bool) {
|
||||||
|
@ -213,7 +212,7 @@ func StartServer(
|
||||||
shutdownC,
|
shutdownC,
|
||||||
graceShutdownC chan struct{},
|
graceShutdownC chan struct{},
|
||||||
namedTunnel *connection.NamedTunnelConfig,
|
namedTunnel *connection.NamedTunnelConfig,
|
||||||
generalLogger logger.Service,
|
log *zerolog.Logger,
|
||||||
isUIEnabled bool,
|
isUIEnabled bool,
|
||||||
) error {
|
) error {
|
||||||
_ = raven.SetDSN(sentryDSN)
|
_ = raven.SetDSN(sentryDSN)
|
||||||
|
@ -224,45 +223,45 @@ func StartServer(
|
||||||
dnsReadySignal := make(chan struct{})
|
dnsReadySignal := make(chan struct{})
|
||||||
|
|
||||||
if config.GetConfiguration().Source() == "" {
|
if config.GetConfiguration().Source() == "" {
|
||||||
generalLogger.Infof(config.ErrNoConfigFile.Error())
|
log.Info().Msg(config.ErrNoConfigFile.Error())
|
||||||
}
|
}
|
||||||
|
|
||||||
if c.IsSet("trace-output") {
|
if c.IsSet("trace-output") {
|
||||||
tmpTraceFile, err := ioutil.TempFile("", "trace")
|
tmpTraceFile, err := ioutil.TempFile("", "trace")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
generalLogger.Errorf("Failed to create new temporary file to save trace output: %s", err)
|
log.Error().Msgf("Failed to create new temporary file to save trace output: %s", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
defer func() {
|
defer func() {
|
||||||
if err := tmpTraceFile.Close(); err != nil {
|
if err := tmpTraceFile.Close(); err != nil {
|
||||||
generalLogger.Errorf("Failed to close trace output file %s with error: %s", tmpTraceFile.Name(), err)
|
log.Error().Msgf("Failed to close trace output file %s with error: %s", tmpTraceFile.Name(), err)
|
||||||
}
|
}
|
||||||
if err := os.Rename(tmpTraceFile.Name(), c.String("trace-output")); err != nil {
|
if err := os.Rename(tmpTraceFile.Name(), c.String("trace-output")); err != nil {
|
||||||
generalLogger.Errorf("Failed to rename temporary trace output file %s to %s with error: %s", tmpTraceFile.Name(), c.String("trace-output"), err)
|
log.Error().Msgf("Failed to rename temporary trace output file %s to %s with error: %s", tmpTraceFile.Name(), c.String("trace-output"), err)
|
||||||
} else {
|
} else {
|
||||||
err := os.Remove(tmpTraceFile.Name())
|
err := os.Remove(tmpTraceFile.Name())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
generalLogger.Errorf("Failed to remove the temporary trace file %s with error: %s", tmpTraceFile.Name(), err)
|
log.Error().Msgf("Failed to remove the temporary trace file %s with error: %s", tmpTraceFile.Name(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if err := trace.Start(tmpTraceFile); err != nil {
|
if err := trace.Start(tmpTraceFile); err != nil {
|
||||||
generalLogger.Errorf("Failed to start trace: %s", err)
|
log.Error().Msgf("Failed to start trace: %s", err)
|
||||||
return errors.Wrap(err, "Error starting tracing")
|
return errors.Wrap(err, "Error starting tracing")
|
||||||
}
|
}
|
||||||
defer trace.Stop()
|
defer trace.Stop()
|
||||||
}
|
}
|
||||||
|
|
||||||
buildInfo := buildinfo.GetBuildInfo(version)
|
buildInfo := buildinfo.GetBuildInfo(version)
|
||||||
buildInfo.Log(generalLogger)
|
buildInfo.Log(log)
|
||||||
logClientOptions(c, generalLogger)
|
logClientOptions(c, log)
|
||||||
|
|
||||||
if c.IsSet("proxy-dns") {
|
if c.IsSet("proxy-dns") {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
errC <- runDNSProxyServer(c, dnsReadySignal, shutdownC, generalLogger)
|
errC <- runDNSProxyServer(c, dnsReadySignal, shutdownC, log)
|
||||||
}()
|
}()
|
||||||
} else {
|
} else {
|
||||||
close(dnsReadySignal)
|
close(dnsReadySignal)
|
||||||
|
@ -273,12 +272,12 @@ func StartServer(
|
||||||
|
|
||||||
go notifySystemd(connectedSignal)
|
go notifySystemd(connectedSignal)
|
||||||
if c.IsSet("pidfile") {
|
if c.IsSet("pidfile") {
|
||||||
go writePidFile(connectedSignal, c.String("pidfile"), generalLogger)
|
go writePidFile(connectedSignal, c.String("pidfile"), log)
|
||||||
}
|
}
|
||||||
|
|
||||||
cloudflaredID, err := uuid.NewRandom()
|
cloudflaredID, err := uuid.NewRandom()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
generalLogger.Errorf("Cannot generate cloudflared ID: %s", err)
|
log.Error().Msgf("Cannot generate cloudflared ID: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -289,12 +288,12 @@ func StartServer(
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// update needs to be after DNS proxy is up to resolve equinox server address
|
// update needs to be after DNS proxy is up to resolve equinox server address
|
||||||
if updater.IsAutoupdateEnabled(c, generalLogger) {
|
if updater.IsAutoupdateEnabled(c, log) {
|
||||||
generalLogger.Infof("Autoupdate frequency is set to %v", c.Duration("autoupdate-freq"))
|
log.Info().Msgf("Autoupdate frequency is set to %v", c.Duration("autoupdate-freq"))
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
autoupdater := updater.NewAutoUpdater(c.Duration("autoupdate-freq"), &listeners, generalLogger)
|
autoupdater := updater.NewAutoUpdater(c.Duration("autoupdate-freq"), &listeners, log)
|
||||||
errC <- autoupdater.Run(ctx)
|
errC <- autoupdater.Run(ctx)
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -303,21 +302,18 @@ func StartServer(
|
||||||
if dnsProxyStandAlone(c) {
|
if dnsProxyStandAlone(c) {
|
||||||
connectedSignal.Notify()
|
connectedSignal.Notify()
|
||||||
// no grace period, handle SIGINT/SIGTERM immediately
|
// no grace period, handle SIGINT/SIGTERM immediately
|
||||||
return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, 0, generalLogger)
|
return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, 0, log)
|
||||||
}
|
}
|
||||||
|
|
||||||
url := c.String("url")
|
url := c.String("url")
|
||||||
hostname := c.String("hostname")
|
hostname := c.String("hostname")
|
||||||
if url == hostname && url != "" && hostname != "" {
|
if url == hostname && url != "" && hostname != "" {
|
||||||
errText := "hostname and url shouldn't match. See --help for more information"
|
errText := "hostname and url shouldn't match. See --help for more information"
|
||||||
generalLogger.Error(errText)
|
log.Error().Msg(errText)
|
||||||
return fmt.Errorf(errText)
|
return fmt.Errorf(errText)
|
||||||
}
|
}
|
||||||
|
|
||||||
transportLogger, err := logger.CreateTransportLoggerFromContext(c, isUIEnabled)
|
transportLog := logger.CreateTransportLoggerFromContext(c, isUIEnabled)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up transport logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
readinessCh := make(chan connection.Event, 16)
|
readinessCh := make(chan connection.Event, 16)
|
||||||
uiCh := make(chan connection.Event, 16)
|
uiCh := make(chan connection.Event, 16)
|
||||||
|
@ -325,30 +321,30 @@ func StartServer(
|
||||||
readinessCh,
|
readinessCh,
|
||||||
uiCh,
|
uiCh,
|
||||||
}
|
}
|
||||||
tunnelConfig, ingressRules, err := prepareTunnelConfig(c, buildInfo, version, generalLogger, transportLogger, namedTunnel, isUIEnabled, eventChannels)
|
tunnelConfig, ingressRules, err := prepareTunnelConfig(c, buildInfo, version, log, transportLog, namedTunnel, isUIEnabled, eventChannels)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
generalLogger.Errorf("Couldn't start tunnel: %v", err)
|
log.Error().Msgf("Couldn't start tunnel: %v", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
metricsListener, err := listeners.Listen("tcp", c.String("metrics"))
|
metricsListener, err := listeners.Listen("tcp", c.String("metrics"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
generalLogger.Errorf("Error opening metrics server listener: %s", err)
|
log.Error().Msgf("Error opening metrics server listener: %s", err)
|
||||||
return errors.Wrap(err, "Error opening metrics server listener")
|
return errors.Wrap(err, "Error opening metrics server listener")
|
||||||
}
|
}
|
||||||
defer metricsListener.Close()
|
defer metricsListener.Close()
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
errC <- metrics.ServeMetrics(metricsListener, shutdownC, readinessCh, generalLogger)
|
errC <- metrics.ServeMetrics(metricsListener, shutdownC, readinessCh, log)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
ingressRules.StartOrigins(&wg, generalLogger, shutdownC, errC)
|
ingressRules.StartOrigins(&wg, log, shutdownC, errC)
|
||||||
|
|
||||||
reconnectCh := make(chan origin.ReconnectSignal, 1)
|
reconnectCh := make(chan origin.ReconnectSignal, 1)
|
||||||
if c.IsSet("stdin-control") {
|
if c.IsSet("stdin-control") {
|
||||||
generalLogger.Info("Enabling control through stdin")
|
log.Info().Msg("Enabling control through stdin")
|
||||||
go stdinControl(reconnectCh, generalLogger)
|
go stdinControl(reconnectCh, log)
|
||||||
}
|
}
|
||||||
|
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
|
@ -365,31 +361,15 @@ func StartServer(
|
||||||
&ingressRules,
|
&ingressRules,
|
||||||
tunnelConfig.HAConnections,
|
tunnelConfig.HAConnections,
|
||||||
)
|
)
|
||||||
logLevels, err := logger.ParseLevelString(c.String("loglevel"))
|
tunnelInfo.LaunchUI(ctx, log, transportLog, uiCh)
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
tunnelInfo.LaunchUI(ctx, generalLogger, transportLogger, logLevels, uiCh)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, c.Duration("grace-period"), generalLogger)
|
return waitToShutdown(&wg, errC, shutdownC, graceShutdownC, c.Duration("grace-period"), log)
|
||||||
}
|
|
||||||
|
|
||||||
// forceSetFlag attempts to set the given flag value in the closest context that has it defined
|
|
||||||
func forceSetFlag(c *cli.Context, name, value string) {
|
|
||||||
for _, ctx := range c.Lineage() {
|
|
||||||
if err := ctx.Set(name, value); err == nil {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func SetFlagsFromConfigFile(c *cli.Context) error {
|
func SetFlagsFromConfigFile(c *cli.Context) error {
|
||||||
const exitCode = 1
|
const exitCode = 1
|
||||||
log, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return cliutil.PrintLoggerSetupError("error setting up logger", err)
|
|
||||||
}
|
|
||||||
inputSource, err := config.ReadConfigFile(c, log)
|
inputSource, err := config.ReadConfigFile(c, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if err == config.ErrNoConfigFile {
|
if err == config.ErrNoConfigFile {
|
||||||
|
@ -411,20 +391,20 @@ func waitToShutdown(wg *sync.WaitGroup,
|
||||||
errC chan error,
|
errC chan error,
|
||||||
shutdownC, graceShutdownC chan struct{},
|
shutdownC, graceShutdownC chan struct{},
|
||||||
gracePeriod time.Duration,
|
gracePeriod time.Duration,
|
||||||
logger logger.Service,
|
log *zerolog.Logger,
|
||||||
) error {
|
) error {
|
||||||
var err error
|
var err error
|
||||||
if gracePeriod > 0 {
|
if gracePeriod > 0 {
|
||||||
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceShutdownC, gracePeriod, logger)
|
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceShutdownC, gracePeriod, log)
|
||||||
} else {
|
} else {
|
||||||
err = waitForSignal(errC, shutdownC, logger)
|
err = waitForSignal(errC, shutdownC, log)
|
||||||
close(graceShutdownC)
|
close(graceShutdownC)
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Quitting due to error: %s", err)
|
log.Error().Msgf("Quitting due to error: %s", err)
|
||||||
} else {
|
} else {
|
||||||
logger.Info("Quitting...")
|
log.Info().Msg("Quitting...")
|
||||||
}
|
}
|
||||||
// Wait for clean exit, discarding all errors
|
// Wait for clean exit, discarding all errors
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -440,16 +420,16 @@ func notifySystemd(waitForSignal *signal.Signal) {
|
||||||
daemon.SdNotify(false, "READY=1")
|
daemon.SdNotify(false, "READY=1")
|
||||||
}
|
}
|
||||||
|
|
||||||
func writePidFile(waitForSignal *signal.Signal, pidFile string, logger logger.Service) {
|
func writePidFile(waitForSignal *signal.Signal, pidFile string, log *zerolog.Logger) {
|
||||||
<-waitForSignal.Wait()
|
<-waitForSignal.Wait()
|
||||||
expandedPath, err := homedir.Expand(pidFile)
|
expandedPath, err := homedir.Expand(pidFile)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Unable to expand %s, try to use absolute path in --pidfile: %s", pidFile, err)
|
log.Error().Msgf("Unable to expand %s, try to use absolute path in --pidfile: %s", pidFile, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
file, err := os.Create(expandedPath)
|
file, err := os.Create(expandedPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Unable to write pid to %s: %s", expandedPath, err)
|
log.Error().Msgf("Unable to write pid to %s: %s", expandedPath, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer file.Close()
|
defer file.Close()
|
||||||
|
@ -1018,7 +998,7 @@ func configureProxyDNSFlags(shouldHide bool) []cli.Flag {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func stdinControl(reconnectCh chan origin.ReconnectSignal, logger logger.Service) {
|
func stdinControl(reconnectCh chan origin.ReconnectSignal, log *zerolog.Logger) {
|
||||||
for {
|
for {
|
||||||
scanner := bufio.NewScanner(os.Stdin)
|
scanner := bufio.NewScanner(os.Stdin)
|
||||||
for scanner.Scan() {
|
for scanner.Scan() {
|
||||||
|
@ -1033,17 +1013,17 @@ func stdinControl(reconnectCh chan origin.ReconnectSignal, logger logger.Service
|
||||||
if len(parts) > 1 {
|
if len(parts) > 1 {
|
||||||
var err error
|
var err error
|
||||||
if reconnect.Delay, err = time.ParseDuration(parts[1]); err != nil {
|
if reconnect.Delay, err = time.ParseDuration(parts[1]); err != nil {
|
||||||
logger.Error(err.Error())
|
log.Error().Msg(err.Error())
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.Infof("Sending reconnect signal %+v", reconnect)
|
log.Info().Msgf("Sending reconnect signal %+v", reconnect)
|
||||||
reconnectCh <- reconnect
|
reconnectCh <- reconnect
|
||||||
default:
|
default:
|
||||||
logger.Infof("Unknown command: %s", command)
|
log.Info().Msgf("Unknown command: %s", command)
|
||||||
fallthrough
|
fallthrough
|
||||||
case "help":
|
case "help":
|
||||||
logger.Info(`Supported command:
|
log.Info().Msg(`Supported command:
|
||||||
reconnect [delay]
|
reconnect [delay]
|
||||||
- restarts one randomly chosen connection with optional delay before reconnect`)
|
- restarts one randomly chosen connection with optional delay before reconnect`)
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,6 @@ import (
|
||||||
"github.com/cloudflare/cloudflared/edgediscovery"
|
"github.com/cloudflare/cloudflared/edgediscovery"
|
||||||
"github.com/cloudflare/cloudflared/h2mux"
|
"github.com/cloudflare/cloudflared/h2mux"
|
||||||
"github.com/cloudflare/cloudflared/ingress"
|
"github.com/cloudflare/cloudflared/ingress"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/origin"
|
"github.com/cloudflare/cloudflared/origin"
|
||||||
"github.com/cloudflare/cloudflared/tlsconfig"
|
"github.com/cloudflare/cloudflared/tlsconfig"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
|
@ -23,6 +22,7 @@ import (
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/crypto/ssh/terminal"
|
"golang.org/x/crypto/ssh/terminal"
|
||||||
)
|
)
|
||||||
|
@ -46,16 +46,16 @@ func findDefaultOriginCertPath() string {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
|
|
||||||
func generateRandomClientID(logger logger.Service) (string, error) {
|
func generateRandomClientID(log *zerolog.Logger) (string, error) {
|
||||||
u, err := uuid.NewRandom()
|
u, err := uuid.NewRandom()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("couldn't create UUID for client ID %s", err)
|
log.Error().Msgf("couldn't create UUID for client ID %s", err)
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
return u.String(), nil
|
return u.String(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func logClientOptions(c *cli.Context, logger logger.Service) {
|
func logClientOptions(c *cli.Context, log *zerolog.Logger) {
|
||||||
flags := make(map[string]interface{})
|
flags := make(map[string]interface{})
|
||||||
for _, flag := range c.LocalFlagNames() {
|
for _, flag := range c.LocalFlagNames() {
|
||||||
flags[flag] = c.Generic(flag)
|
flags[flag] = c.Generic(flag)
|
||||||
|
@ -69,7 +69,7 @@ func logClientOptions(c *cli.Context, logger logger.Service) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if len(flags) > 0 {
|
if len(flags) > 0 {
|
||||||
logger.Infof("Environment variables %v", flags)
|
log.Info().Msgf("Environment variables %v", flags)
|
||||||
}
|
}
|
||||||
|
|
||||||
envs := make(map[string]string)
|
envs := make(map[string]string)
|
||||||
|
@ -84,7 +84,7 @@ func logClientOptions(c *cli.Context, logger logger.Service) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if len(envs) > 0 {
|
if len(envs) > 0 {
|
||||||
logger.Infof("Environmental variables %v", envs)
|
log.Info().Msgf("Environmental variables %v", envs)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -92,32 +92,32 @@ func dnsProxyStandAlone(c *cli.Context) bool {
|
||||||
return c.IsSet("proxy-dns") && (!c.IsSet("hostname") && !c.IsSet("tag") && !c.IsSet("hello-world"))
|
return c.IsSet("proxy-dns") && (!c.IsSet("hostname") && !c.IsSet("tag") && !c.IsSet("hello-world"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func findOriginCert(c *cli.Context, logger logger.Service) (string, error) {
|
func findOriginCert(c *cli.Context, log *zerolog.Logger) (string, error) {
|
||||||
originCertPath := c.String("origincert")
|
originCertPath := c.String("origincert")
|
||||||
if originCertPath == "" {
|
if originCertPath == "" {
|
||||||
logger.Infof("Cannot determine default origin certificate path. No file %s in %v", config.DefaultCredentialFile, config.DefaultConfigSearchDirectories())
|
log.Info().Msgf("Cannot determine default origin certificate path. No file %s in %v", config.DefaultCredentialFile, config.DefaultConfigSearchDirectories())
|
||||||
if isRunningFromTerminal() {
|
if isRunningFromTerminal() {
|
||||||
logger.Errorf("You need to specify the origin certificate path with --origincert option, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", argumentsUrl)
|
log.Error().Msgf("You need to specify the origin certificate path with --origincert option, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", argumentsUrl)
|
||||||
return "", fmt.Errorf("Client didn't specify origincert path when running from terminal")
|
return "", fmt.Errorf("Client didn't specify origincert path when running from terminal")
|
||||||
} else {
|
} else {
|
||||||
logger.Errorf("You need to specify the origin certificate path by specifying the origincert option in the configuration file, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", serviceUrl)
|
log.Error().Msgf("You need to specify the origin certificate path by specifying the origincert option in the configuration file, or set TUNNEL_ORIGIN_CERT environment variable. See %s for more information.", serviceUrl)
|
||||||
return "", fmt.Errorf("Client didn't specify origincert path")
|
return "", fmt.Errorf("Client didn't specify origincert path")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
var err error
|
var err error
|
||||||
originCertPath, err = homedir.Expand(originCertPath)
|
originCertPath, err = homedir.Expand(originCertPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot resolve path %s: %s", originCertPath, err)
|
log.Error().Msgf("Cannot resolve path %s: %s", originCertPath, err)
|
||||||
return "", fmt.Errorf("Cannot resolve path %s", originCertPath)
|
return "", fmt.Errorf("Cannot resolve path %s", originCertPath)
|
||||||
}
|
}
|
||||||
// Check that the user has acquired a certificate using the login command
|
// Check that the user has acquired a certificate using the login command
|
||||||
ok, err := config.FileExists(originCertPath)
|
ok, err := config.FileExists(originCertPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot check if origin cert exists at path %s", originCertPath)
|
log.Error().Msgf("Cannot check if origin cert exists at path %s", originCertPath)
|
||||||
return "", fmt.Errorf("Cannot check if origin cert exists at path %s", originCertPath)
|
return "", fmt.Errorf("Cannot check if origin cert exists at path %s", originCertPath)
|
||||||
}
|
}
|
||||||
if !ok {
|
if !ok {
|
||||||
logger.Errorf(`Cannot find a valid certificate for your origin at the path:
|
log.Error().Msgf(`Cannot find a valid certificate for your origin at the path:
|
||||||
|
|
||||||
%s
|
%s
|
||||||
|
|
||||||
|
@ -132,23 +132,23 @@ If you don't have a certificate signed by Cloudflare, run the command:
|
||||||
return originCertPath, nil
|
return originCertPath, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func readOriginCert(originCertPath string, logger logger.Service) ([]byte, error) {
|
func readOriginCert(originCertPath string, log *zerolog.Logger) ([]byte, error) {
|
||||||
logger.Debugf("Reading origin cert from %s", originCertPath)
|
log.Debug().Msgf("Reading origin cert from %s", originCertPath)
|
||||||
|
|
||||||
// Easier to send the certificate as []byte via RPC than decoding it at this point
|
// Easier to send the certificate as []byte via RPC than decoding it at this point
|
||||||
originCert, err := ioutil.ReadFile(originCertPath)
|
originCert, err := ioutil.ReadFile(originCertPath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot read %s to load origin certificate: %s", originCertPath, err)
|
log.Error().Msgf("Cannot read %s to load origin certificate: %s", originCertPath, err)
|
||||||
return nil, fmt.Errorf("Cannot read %s to load origin certificate", originCertPath)
|
return nil, fmt.Errorf("Cannot read %s to load origin certificate", originCertPath)
|
||||||
}
|
}
|
||||||
return originCert, nil
|
return originCert, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func getOriginCert(c *cli.Context, logger logger.Service) ([]byte, error) {
|
func getOriginCert(c *cli.Context, log *zerolog.Logger) ([]byte, error) {
|
||||||
if originCertPath, err := findOriginCert(c, logger); err != nil {
|
if originCertPath, err := findOriginCert(c, log); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
} else {
|
} else {
|
||||||
return readOriginCert(originCertPath, logger)
|
return readOriginCert(originCertPath, log)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -156,8 +156,8 @@ func prepareTunnelConfig(
|
||||||
c *cli.Context,
|
c *cli.Context,
|
||||||
buildInfo *buildinfo.BuildInfo,
|
buildInfo *buildinfo.BuildInfo,
|
||||||
version string,
|
version string,
|
||||||
logger logger.Service,
|
log *zerolog.Logger,
|
||||||
transportLogger logger.Service,
|
transportLogger *zerolog.Logger,
|
||||||
namedTunnel *connection.NamedTunnelConfig,
|
namedTunnel *connection.NamedTunnelConfig,
|
||||||
isUIEnabled bool,
|
isUIEnabled bool,
|
||||||
eventChans []chan connection.Event,
|
eventChans []chan connection.Event,
|
||||||
|
@ -166,13 +166,13 @@ func prepareTunnelConfig(
|
||||||
|
|
||||||
hostname, err := validation.ValidateHostname(c.String("hostname"))
|
hostname, err := validation.ValidateHostname(c.String("hostname"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Invalid hostname: %s", err)
|
log.Error().Msgf("Invalid hostname: %s", err)
|
||||||
return nil, ingress.Ingress{}, errors.Wrap(err, "Invalid hostname")
|
return nil, ingress.Ingress{}, errors.Wrap(err, "Invalid hostname")
|
||||||
}
|
}
|
||||||
isFreeTunnel := hostname == ""
|
isFreeTunnel := hostname == ""
|
||||||
clientID := c.String("id")
|
clientID := c.String("id")
|
||||||
if !c.IsSet("id") {
|
if !c.IsSet("id") {
|
||||||
clientID, err = generateRandomClientID(logger)
|
clientID, err = generateRandomClientID(log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ingress.Ingress{}, err
|
return nil, ingress.Ingress{}, err
|
||||||
}
|
}
|
||||||
|
@ -180,7 +180,7 @@ func prepareTunnelConfig(
|
||||||
|
|
||||||
tags, err := NewTagSliceFromCLI(c.StringSlice("tag"))
|
tags, err := NewTagSliceFromCLI(c.StringSlice("tag"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Tag parse failure: %s", err)
|
log.Error().Msgf("Tag parse failure: %s", err)
|
||||||
return nil, ingress.Ingress{}, errors.Wrap(err, "Tag parse failure")
|
return nil, ingress.Ingress{}, errors.Wrap(err, "Tag parse failure")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,7 +188,7 @@ func prepareTunnelConfig(
|
||||||
|
|
||||||
var originCert []byte
|
var originCert []byte
|
||||||
if !isFreeTunnel {
|
if !isFreeTunnel {
|
||||||
originCert, err = getOriginCert(c, logger)
|
originCert, err = getOriginCert(c, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ingress.Ingress{}, errors.Wrap(err, "Error getting origin cert")
|
return nil, ingress.Ingress{}, errors.Wrap(err, "Error getting origin cert")
|
||||||
}
|
}
|
||||||
|
@ -227,17 +227,17 @@ func prepareTunnelConfig(
|
||||||
|
|
||||||
// Convert single-origin configuration into multi-origin configuration.
|
// Convert single-origin configuration into multi-origin configuration.
|
||||||
if ingressRules.IsEmpty() {
|
if ingressRules.IsEmpty() {
|
||||||
ingressRules, err = ingress.NewSingleOrigin(c, !isNamedTunnel, logger)
|
ingressRules, err = ingress.NewSingleOrigin(c, !isNamedTunnel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ingress.Ingress{}, err
|
return nil, ingress.Ingress{}, err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
protocolSelector, err := connection.NewProtocolSelector(c.String("protocol"), namedTunnel, edgediscovery.HTTP2Percentage, origin.ResolveTTL, logger)
|
protocolSelector, err := connection.NewProtocolSelector(c.String("protocol"), namedTunnel, edgediscovery.HTTP2Percentage, origin.ResolveTTL, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, ingress.Ingress{}, err
|
return nil, ingress.Ingress{}, err
|
||||||
}
|
}
|
||||||
logger.Infof("Initial protocol %s", protocolSelector.Current())
|
log.Info().Msgf("Initial protocol %s", protocolSelector.Current())
|
||||||
|
|
||||||
edgeTLSConfigs := make(map[connection.Protocol]*tls.Config, len(connection.ProtocolList))
|
edgeTLSConfigs := make(map[connection.Protocol]*tls.Config, len(connection.ProtocolList))
|
||||||
for _, p := range connection.ProtocolList {
|
for _, p := range connection.ProtocolList {
|
||||||
|
@ -248,7 +248,7 @@ func prepareTunnelConfig(
|
||||||
edgeTLSConfigs[p] = edgeTLSConfig
|
edgeTLSConfigs[p] = edgeTLSConfig
|
||||||
}
|
}
|
||||||
|
|
||||||
originClient := origin.NewClient(ingressRules, tags, logger)
|
originClient := origin.NewClient(ingressRules, tags, log)
|
||||||
connectionConfig := &connection.Config{
|
connectionConfig := &connection.Config{
|
||||||
OriginClient: originClient,
|
OriginClient: originClient,
|
||||||
GracePeriod: c.Duration("grace-period"),
|
GracePeriod: c.Duration("grace-period"),
|
||||||
|
@ -272,7 +272,7 @@ func prepareTunnelConfig(
|
||||||
IsFreeTunnel: isFreeTunnel,
|
IsFreeTunnel: isFreeTunnel,
|
||||||
LBPool: c.String("lb-pool"),
|
LBPool: c.String("lb-pool"),
|
||||||
Tags: tags,
|
Tags: tags,
|
||||||
Logger: logger,
|
Log: log,
|
||||||
Observer: connection.NewObserver(transportLogger, eventChans, isUIEnabled),
|
Observer: connection.NewObserver(transportLogger, eventChans, isUIEnabled),
|
||||||
ReportedVersion: version,
|
ReportedVersion: version,
|
||||||
Retries: c.Uint("retries"),
|
Retries: c.Uint("retries"),
|
||||||
|
|
|
@ -5,8 +5,9 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -39,25 +40,25 @@ func (a staticPath) Path() (string, error) {
|
||||||
// Implements CredFinder and looks for the credentials file in several directories
|
// Implements CredFinder and looks for the credentials file in several directories
|
||||||
// searching for a file named <id>.json
|
// searching for a file named <id>.json
|
||||||
type searchByID struct {
|
type searchByID struct {
|
||||||
id uuid.UUID
|
id uuid.UUID
|
||||||
c *cli.Context
|
c *cli.Context
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
fs fileSystem
|
fs fileSystem
|
||||||
}
|
}
|
||||||
|
|
||||||
func newSearchByID(id uuid.UUID, c *cli.Context, logger logger.Service, fs fileSystem) CredFinder {
|
func newSearchByID(id uuid.UUID, c *cli.Context, log *zerolog.Logger, fs fileSystem) CredFinder {
|
||||||
return searchByID{
|
return searchByID{
|
||||||
id: id,
|
id: id,
|
||||||
c: c,
|
c: c,
|
||||||
logger: logger,
|
log: log,
|
||||||
fs: fs,
|
fs: fs,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s searchByID) Path() (string, error) {
|
func (s searchByID) Path() (string, error) {
|
||||||
|
|
||||||
// Fallback to look for tunnel credentials in the origin cert directory
|
// Fallback to look for tunnel credentials in the origin cert directory
|
||||||
if originCertPath, err := findOriginCert(s.c, s.logger); err == nil {
|
if originCertPath, err := findOriginCert(s.c, s.log); err == nil {
|
||||||
originCertDir := filepath.Dir(originCertPath)
|
originCertDir := filepath.Dir(originCertPath)
|
||||||
if filePath, err := tunnelFilePath(s.id, originCertDir); err == nil {
|
if filePath, err := tunnelFilePath(s.id, originCertDir); err == nil {
|
||||||
if s.fs.validFilePath(filePath) {
|
if s.fs.validFilePath(filePath) {
|
||||||
|
|
|
@ -4,12 +4,12 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/ingress"
|
"github.com/cloudflare/cloudflared/ingress"
|
||||||
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func buildIngressSubcommand() *cli.Command {
|
func buildIngressSubcommand() *cli.Command {
|
||||||
|
|
|
@ -8,9 +8,9 @@ import (
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"syscall"
|
"syscall"
|
||||||
|
|
||||||
homedir "github.com/mitchellh/go-homedir"
|
"github.com/mitchellh/go-homedir"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
cli "github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
|
@ -40,10 +40,7 @@ func buildLoginSubcommand(hidden bool) *cli.Command {
|
||||||
}
|
}
|
||||||
|
|
||||||
func login(c *cli.Context) error {
|
func login(c *cli.Context) error {
|
||||||
logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
path, ok, err := checkForExistingCert()
|
path, ok, err := checkForExistingCert()
|
||||||
if ok {
|
if ok {
|
||||||
|
@ -59,7 +56,15 @@ func login(c *cli.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
resourceData, err := transfer.Run(loginURL, "cert", "callback", callbackStoreURL, false, false, logger)
|
resourceData, err := transfer.Run(
|
||||||
|
loginURL,
|
||||||
|
"cert",
|
||||||
|
"callback",
|
||||||
|
callbackStoreURL,
|
||||||
|
false,
|
||||||
|
false,
|
||||||
|
log,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(os.Stderr, "Failed to write the certificate due to the following error:\n%v\n\nYour browser will download the certificate instead. You will have to manually\ncopy it to the following path:\n\n%s\n", err, path)
|
fmt.Fprintf(os.Stderr, "Failed to write the certificate due to the following error:\n%v\n\nYour browser will download the certificate instead. You will have to manually\ncopy it to the following path:\n\n%s\n", err, path)
|
||||||
return err
|
return err
|
||||||
|
|
|
@ -1,20 +1,19 @@
|
||||||
package tunnel
|
package tunnel
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/tunneldns"
|
"github.com/cloudflare/cloudflared/tunneldns"
|
||||||
|
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
func runDNSProxyServer(c *cli.Context, dnsReadySignal, shutdownC chan struct{}, logger logger.Service) error {
|
func runDNSProxyServer(c *cli.Context, dnsReadySignal, shutdownC chan struct{}, log *zerolog.Logger) error {
|
||||||
port := c.Int("proxy-dns-port")
|
port := c.Int("proxy-dns-port")
|
||||||
if port <= 0 || port > 65535 {
|
if port <= 0 || port > 65535 {
|
||||||
return errors.New("The 'proxy-dns-port' must be a valid port number in <1, 65535> range.")
|
return errors.New("The 'proxy-dns-port' must be a valid port number in <1, 65535> range.")
|
||||||
}
|
}
|
||||||
listener, err := tunneldns.CreateListener(c.String("proxy-dns-address"), uint16(port), c.StringSlice("proxy-dns-upstream"), c.StringSlice("proxy-dns-bootstrap"), logger)
|
listener, err := tunneldns.CreateListener(c.String("proxy-dns-address"), uint16(port), c.StringSlice("proxy-dns-upstream"), c.StringSlice("proxy-dns-bootstrap"), log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
close(dnsReadySignal)
|
close(dnsReadySignal)
|
||||||
listener.Stop()
|
listener.Stop()
|
||||||
|
@ -26,6 +25,6 @@ func runDNSProxyServer(c *cli.Context, dnsReadySignal, shutdownC chan struct{},
|
||||||
return errors.Wrap(err, "Cannot start the DNS over HTTPS proxy server")
|
return errors.Wrap(err, "Cannot start the DNS over HTTPS proxy server")
|
||||||
}
|
}
|
||||||
<-shutdownC
|
<-shutdownC
|
||||||
listener.Stop()
|
_ = listener.Stop()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,24 +6,24 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// waitForSignal notifies all routines to shutdownC immediately by closing the
|
// waitForSignal notifies all routines to shutdownC immediately by closing the
|
||||||
// shutdownC when one of the routines in main exits, or when this process receives
|
// shutdownC when one of the routines in main exits, or when this process receives
|
||||||
// SIGTERM/SIGINT
|
// SIGTERM/SIGINT
|
||||||
func waitForSignal(errC chan error, shutdownC chan struct{}, logger logger.Service) error {
|
func waitForSignal(errC chan error, shutdownC chan struct{}, log *zerolog.Logger) error {
|
||||||
signals := make(chan os.Signal, 10)
|
signals := make(chan os.Signal, 10)
|
||||||
signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT)
|
signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT)
|
||||||
defer signal.Stop(signals)
|
defer signal.Stop(signals)
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case err := <-errC:
|
case err := <-errC:
|
||||||
logger.Infof("terminating due to error: %v", err)
|
log.Info().Msgf("terminating due to error: %v", err)
|
||||||
close(shutdownC)
|
close(shutdownC)
|
||||||
return err
|
return err
|
||||||
case s := <-signals:
|
case s := <-signals:
|
||||||
logger.Infof("terminating due to signal %s", s)
|
log.Info().Msgf("terminating due to signal %s", s)
|
||||||
close(shutdownC)
|
close(shutdownC)
|
||||||
case <-shutdownC:
|
case <-shutdownC:
|
||||||
}
|
}
|
||||||
|
@ -41,7 +41,7 @@ func waitForSignal(errC chan error, shutdownC chan struct{}, logger logger.Servi
|
||||||
func waitForSignalWithGraceShutdown(errC chan error,
|
func waitForSignalWithGraceShutdown(errC chan error,
|
||||||
shutdownC, graceShutdownC chan struct{},
|
shutdownC, graceShutdownC chan struct{},
|
||||||
gracePeriod time.Duration,
|
gracePeriod time.Duration,
|
||||||
logger logger.Service,
|
logger *zerolog.Logger,
|
||||||
) error {
|
) error {
|
||||||
signals := make(chan os.Signal, 10)
|
signals := make(chan os.Signal, 10)
|
||||||
signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT)
|
signal.Notify(signals, syscall.SIGTERM, syscall.SIGINT)
|
||||||
|
@ -49,16 +49,16 @@ func waitForSignalWithGraceShutdown(errC chan error,
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case err := <-errC:
|
case err := <-errC:
|
||||||
logger.Infof("Initiating graceful shutdown due to %v ...", err)
|
logger.Info().Msgf("Initiating graceful shutdown due to %v ...", err)
|
||||||
close(graceShutdownC)
|
close(graceShutdownC)
|
||||||
close(shutdownC)
|
close(shutdownC)
|
||||||
return err
|
return err
|
||||||
case s := <-signals:
|
case s := <-signals:
|
||||||
logger.Infof("Initiating graceful shutdown due to signal %s ...", s)
|
logger.Info().Msgf("Initiating graceful shutdown due to signal %s ...", s)
|
||||||
close(graceShutdownC)
|
close(graceShutdownC)
|
||||||
waitForGracePeriod(signals, errC, shutdownC, gracePeriod, logger)
|
waitForGracePeriod(signals, errC, shutdownC, gracePeriod)
|
||||||
case <-graceShutdownC:
|
case <-graceShutdownC:
|
||||||
waitForGracePeriod(signals, errC, shutdownC, gracePeriod, logger)
|
waitForGracePeriod(signals, errC, shutdownC, gracePeriod)
|
||||||
case <-shutdownC:
|
case <-shutdownC:
|
||||||
close(graceShutdownC)
|
close(graceShutdownC)
|
||||||
}
|
}
|
||||||
|
@ -70,7 +70,6 @@ func waitForGracePeriod(signals chan os.Signal,
|
||||||
errC chan error,
|
errC chan error,
|
||||||
shutdownC chan struct{},
|
shutdownC chan struct{},
|
||||||
gracePeriod time.Duration,
|
gracePeriod time.Duration,
|
||||||
logger logger.Service,
|
|
||||||
) {
|
) {
|
||||||
// Unregister signal handler early, so the client can send a second SIGTERM/SIGINT
|
// Unregister signal handler early, so the client can send a second SIGTERM/SIGINT
|
||||||
// to force shutdown cloudflared
|
// to force shutdown cloudflared
|
||||||
|
|
|
@ -2,11 +2,11 @@ package tunnel
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"syscall"
|
"syscall"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -28,7 +28,7 @@ func testChannelClosed(t *testing.T, c chan struct{}) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestWaitForSignal(t *testing.T) {
|
func TestWaitForSignal(t *testing.T) {
|
||||||
logger := logger.NewOutputWriter(logger.NewMockWriteManager())
|
log := zerolog.Nop()
|
||||||
|
|
||||||
// Test handling server error
|
// Test handling server error
|
||||||
errC := make(chan error)
|
errC := make(chan error)
|
||||||
|
@ -39,7 +39,7 @@ func TestWaitForSignal(t *testing.T) {
|
||||||
}()
|
}()
|
||||||
|
|
||||||
// received error, shutdownC should be closed
|
// received error, shutdownC should be closed
|
||||||
err := waitForSignal(errC, shutdownC, logger)
|
err := waitForSignal(errC, shutdownC, &log)
|
||||||
assert.Equal(t, serverErr, err)
|
assert.Equal(t, serverErr, err)
|
||||||
testChannelClosed(t, shutdownC)
|
testChannelClosed(t, shutdownC)
|
||||||
|
|
||||||
|
@ -56,10 +56,10 @@ func TestWaitForSignal(t *testing.T) {
|
||||||
go func(sig syscall.Signal) {
|
go func(sig syscall.Signal) {
|
||||||
// sleep for a tick to prevent sending signal before calling waitForSignal
|
// sleep for a tick to prevent sending signal before calling waitForSignal
|
||||||
time.Sleep(tick)
|
time.Sleep(tick)
|
||||||
syscall.Kill(syscall.Getpid(), sig)
|
_ = syscall.Kill(syscall.Getpid(), sig)
|
||||||
}(sig)
|
}(sig)
|
||||||
|
|
||||||
err = waitForSignal(errC, shutdownC, logger)
|
err = waitForSignal(errC, shutdownC, &log)
|
||||||
assert.Equal(t, nil, err)
|
assert.Equal(t, nil, err)
|
||||||
assert.Equal(t, shutdownErr, <-errC)
|
assert.Equal(t, shutdownErr, <-errC)
|
||||||
testChannelClosed(t, shutdownC)
|
testChannelClosed(t, shutdownC)
|
||||||
|
@ -76,10 +76,10 @@ func TestWaitForSignalWithGraceShutdown(t *testing.T) {
|
||||||
errC <- serverErr
|
errC <- serverErr
|
||||||
}()
|
}()
|
||||||
|
|
||||||
logger := logger.NewOutputWriter(logger.NewMockWriteManager())
|
log := zerolog.Nop()
|
||||||
|
|
||||||
// received error, both shutdownC and graceshutdownC should be closed
|
// received error, both shutdownC and graceshutdownC should be closed
|
||||||
err := waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, logger)
|
err := waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, &log)
|
||||||
assert.Equal(t, serverErr, err)
|
assert.Equal(t, serverErr, err)
|
||||||
testChannelClosed(t, shutdownC)
|
testChannelClosed(t, shutdownC)
|
||||||
testChannelClosed(t, graceshutdownC)
|
testChannelClosed(t, graceshutdownC)
|
||||||
|
@ -89,7 +89,7 @@ func TestWaitForSignalWithGraceShutdown(t *testing.T) {
|
||||||
shutdownC = make(chan struct{})
|
shutdownC = make(chan struct{})
|
||||||
graceshutdownC = make(chan struct{})
|
graceshutdownC = make(chan struct{})
|
||||||
close(shutdownC)
|
close(shutdownC)
|
||||||
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, logger)
|
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, &log)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
testChannelClosed(t, shutdownC)
|
testChannelClosed(t, shutdownC)
|
||||||
testChannelClosed(t, graceshutdownC)
|
testChannelClosed(t, graceshutdownC)
|
||||||
|
@ -99,7 +99,7 @@ func TestWaitForSignalWithGraceShutdown(t *testing.T) {
|
||||||
shutdownC = make(chan struct{})
|
shutdownC = make(chan struct{})
|
||||||
graceshutdownC = make(chan struct{})
|
graceshutdownC = make(chan struct{})
|
||||||
close(graceshutdownC)
|
close(graceshutdownC)
|
||||||
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, logger)
|
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, &log)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
testChannelClosed(t, shutdownC)
|
testChannelClosed(t, shutdownC)
|
||||||
testChannelClosed(t, graceshutdownC)
|
testChannelClosed(t, graceshutdownC)
|
||||||
|
@ -119,10 +119,10 @@ func TestWaitForSignalWithGraceShutdown(t *testing.T) {
|
||||||
go func(sig syscall.Signal) {
|
go func(sig syscall.Signal) {
|
||||||
// sleep for a tick to prevent sending signal before calling waitForSignalWithGraceShutdown
|
// sleep for a tick to prevent sending signal before calling waitForSignalWithGraceShutdown
|
||||||
time.Sleep(tick)
|
time.Sleep(tick)
|
||||||
syscall.Kill(syscall.Getpid(), sig)
|
_ = syscall.Kill(syscall.Getpid(), sig)
|
||||||
}(sig)
|
}(sig)
|
||||||
|
|
||||||
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, logger)
|
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, &log)
|
||||||
assert.Equal(t, nil, err)
|
assert.Equal(t, nil, err)
|
||||||
assert.Equal(t, graceShutdownErr, <-errC)
|
assert.Equal(t, graceShutdownErr, <-errC)
|
||||||
testChannelClosed(t, shutdownC)
|
testChannelClosed(t, shutdownC)
|
||||||
|
@ -145,10 +145,10 @@ func TestWaitForSignalWithGraceShutdown(t *testing.T) {
|
||||||
go func(sig syscall.Signal) {
|
go func(sig syscall.Signal) {
|
||||||
// sleep for a tick to prevent sending signal before calling waitForSignalWithGraceShutdown
|
// sleep for a tick to prevent sending signal before calling waitForSignalWithGraceShutdown
|
||||||
time.Sleep(tick)
|
time.Sleep(tick)
|
||||||
syscall.Kill(syscall.Getpid(), sig)
|
_ = syscall.Kill(syscall.Getpid(), sig)
|
||||||
}(sig)
|
}(sig)
|
||||||
|
|
||||||
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, logger)
|
err = waitForSignalWithGraceShutdown(errC, shutdownC, graceshutdownC, tick, &log)
|
||||||
assert.Equal(t, nil, err)
|
assert.Equal(t, nil, err)
|
||||||
assert.Equal(t, shutdownErr, <-errC)
|
assert.Equal(t, shutdownErr, <-errC)
|
||||||
testChannelClosed(t, shutdownC)
|
testChannelClosed(t, shutdownC)
|
||||||
|
|
|
@ -8,6 +8,7 @@ import (
|
||||||
|
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/certutil"
|
"github.com/cloudflare/cloudflared/certutil"
|
||||||
|
@ -29,7 +30,7 @@ func (e errInvalidJSONCredential) Error() string {
|
||||||
// pass between subcommands, and make sure they are only initialized once
|
// pass between subcommands, and make sure they are only initialized once
|
||||||
type subcommandContext struct {
|
type subcommandContext struct {
|
||||||
c *cli.Context
|
c *cli.Context
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
isUIEnabled bool
|
isUIEnabled bool
|
||||||
fs fileSystem
|
fs fileSystem
|
||||||
|
|
||||||
|
@ -42,14 +43,11 @@ func newSubcommandContext(c *cli.Context) (*subcommandContext, error) {
|
||||||
isUIEnabled := c.IsSet(uiFlag) && c.String("name") != ""
|
isUIEnabled := c.IsSet(uiFlag) && c.String("name") != ""
|
||||||
|
|
||||||
// If UI is enabled, terminal log output should be disabled -- log should be written into a UI log window instead
|
// If UI is enabled, terminal log output should be disabled -- log should be written into a UI log window instead
|
||||||
logger, err := logger.CreateLoggerFromContext(c, isUIEnabled)
|
log := logger.CreateLoggerFromContext(c, isUIEnabled)
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &subcommandContext{
|
return &subcommandContext{
|
||||||
c: c,
|
c: c,
|
||||||
logger: logger,
|
log: log,
|
||||||
isUIEnabled: isUIEnabled,
|
isUIEnabled: isUIEnabled,
|
||||||
fs: realFileSystem{},
|
fs: realFileSystem{},
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -60,7 +58,7 @@ func (sc *subcommandContext) credentialFinder(tunnelID uuid.UUID) CredFinder {
|
||||||
if path := sc.c.String(CredFileFlag); path != "" {
|
if path := sc.c.String(CredFileFlag); path != "" {
|
||||||
return newStaticPath(path, sc.fs)
|
return newStaticPath(path, sc.fs)
|
||||||
}
|
}
|
||||||
return newSearchByID(tunnelID, sc.c, sc.logger, sc.fs)
|
return newSearchByID(tunnelID, sc.c, sc.log, sc.fs)
|
||||||
}
|
}
|
||||||
|
|
||||||
type userCredential struct {
|
type userCredential struct {
|
||||||
|
@ -77,7 +75,15 @@ func (sc *subcommandContext) client() (tunnelstore.Client, error) {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
userAgent := fmt.Sprintf("cloudflared/%s", version)
|
userAgent := fmt.Sprintf("cloudflared/%s", version)
|
||||||
client, err := tunnelstore.NewRESTClient(sc.c.String("api-url"), credential.cert.AccountID, credential.cert.ZoneID, credential.cert.ServiceKey, userAgent, sc.logger)
|
client, err := tunnelstore.NewRESTClient(
|
||||||
|
sc.c.String("api-url"),
|
||||||
|
credential.cert.AccountID,
|
||||||
|
credential.cert.ZoneID,
|
||||||
|
credential.cert.ServiceKey,
|
||||||
|
userAgent,
|
||||||
|
sc.log,
|
||||||
|
)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -87,11 +93,11 @@ func (sc *subcommandContext) client() (tunnelstore.Client, error) {
|
||||||
|
|
||||||
func (sc *subcommandContext) credential() (*userCredential, error) {
|
func (sc *subcommandContext) credential() (*userCredential, error) {
|
||||||
if sc.userCredential == nil {
|
if sc.userCredential == nil {
|
||||||
originCertPath, err := findOriginCert(sc.c, sc.logger)
|
originCertPath, err := findOriginCert(sc.c, sc.log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error locating origin cert")
|
return nil, errors.Wrap(err, "Error locating origin cert")
|
||||||
}
|
}
|
||||||
blocks, err := readOriginCert(originCertPath, sc.logger)
|
blocks, err := readOriginCert(originCertPath, sc.log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrapf(err, "Can't read origin cert from %s", originCertPath)
|
return nil, errors.Wrapf(err, "Can't read origin cert from %s", originCertPath)
|
||||||
}
|
}
|
||||||
|
@ -163,7 +169,7 @@ func (sc *subcommandContext) create(name string) (*tunnelstore.Tunnel, error) {
|
||||||
TunnelName: name,
|
TunnelName: name,
|
||||||
}
|
}
|
||||||
filePath, writeFileErr := writeTunnelCredentials(credential.certPath, &tunnelCredentials)
|
filePath, writeFileErr := writeTunnelCredentials(credential.certPath, &tunnelCredentials)
|
||||||
if err != nil {
|
if writeFileErr != nil {
|
||||||
var errorLines []string
|
var errorLines []string
|
||||||
errorLines = append(errorLines, fmt.Sprintf("Your tunnel '%v' was created with ID %v. However, cloudflared couldn't write to the tunnel credentials file at %v.json.", tunnel.Name, tunnel.ID, tunnel.ID))
|
errorLines = append(errorLines, fmt.Sprintf("Your tunnel '%v' was created with ID %v. However, cloudflared couldn't write to the tunnel credentials file at %v.json.", tunnel.Name, tunnel.ID, tunnel.ID))
|
||||||
errorLines = append(errorLines, fmt.Sprintf("The file-writing error is: %v", writeFileErr))
|
errorLines = append(errorLines, fmt.Sprintf("The file-writing error is: %v", writeFileErr))
|
||||||
|
@ -176,13 +182,13 @@ func (sc *subcommandContext) create(name string) (*tunnelstore.Tunnel, error) {
|
||||||
errorMsg := strings.Join(errorLines, "\n")
|
errorMsg := strings.Join(errorLines, "\n")
|
||||||
return nil, errors.New(errorMsg)
|
return nil, errors.New(errorMsg)
|
||||||
}
|
}
|
||||||
sc.logger.Infof("Tunnel credentials written to %v. cloudflared chose this file based on where your origin certificate was found. Keep this file secret. To revoke these credentials, delete the tunnel.", filePath)
|
sc.log.Info().Msgf("Tunnel credentials written to %v. cloudflared chose this file based on where your origin certificate was found. Keep this file secret. To revoke these credentials, delete the tunnel.", filePath)
|
||||||
|
|
||||||
if outputFormat := sc.c.String(outputFormatFlag.Name); outputFormat != "" {
|
if outputFormat := sc.c.String(outputFormatFlag.Name); outputFormat != "" {
|
||||||
return nil, renderOutput(outputFormat, &tunnel)
|
return nil, renderOutput(outputFormat, &tunnel)
|
||||||
}
|
}
|
||||||
|
|
||||||
sc.logger.Infof("Created tunnel %s with id %s", tunnel.Name, tunnel.ID)
|
sc.log.Info().Msgf("Created tunnel %s with id %s", tunnel.Name, tunnel.ID)
|
||||||
return tunnel, nil
|
return tunnel, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -230,7 +236,7 @@ func (sc *subcommandContext) delete(tunnelIDs []uuid.UUID) error {
|
||||||
credFinder := sc.credentialFinder(id)
|
credFinder := sc.credentialFinder(id)
|
||||||
if tunnelCredentialsPath, err := credFinder.Path(); err == nil {
|
if tunnelCredentialsPath, err := credFinder.Path(); err == nil {
|
||||||
if err = os.Remove(tunnelCredentialsPath); err != nil {
|
if err = os.Remove(tunnelCredentialsPath); err != nil {
|
||||||
sc.logger.Infof("Tunnel %v was deleted, but we could not remove its credentials file %s: %s. Consider deleting this file manually.", id, tunnelCredentialsPath, err)
|
sc.log.Info().Msgf("Tunnel %v was deleted, but we could not remove its credentials file %s: %s. Consider deleting this file manually.", id, tunnelCredentialsPath, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -254,18 +260,19 @@ func (sc *subcommandContext) run(tunnelID uuid.UUID) error {
|
||||||
credentials, err := sc.findCredentials(tunnelID)
|
credentials, err := sc.findCredentials(tunnelID)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if e, ok := err.(errInvalidJSONCredential); ok {
|
if e, ok := err.(errInvalidJSONCredential); ok {
|
||||||
sc.logger.Errorf("The credentials file at %s contained invalid JSON. This is probably caused by passing the wrong filepath. Reminder: the credentials file is a .json file created via `cloudflared tunnel create`.", e.path)
|
sc.log.Error().Msgf("The credentials file at %s contained invalid JSON. This is probably caused by passing the wrong filepath. Reminder: the credentials file is a .json file created via `cloudflared tunnel create`.", e.path)
|
||||||
sc.logger.Errorf("Invalid JSON when parsing credentials file: %s", e.err.Error())
|
sc.log.Error().Msgf("Invalid JSON when parsing credentials file: %s", e.err.Error())
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
return StartServer(
|
return StartServer(
|
||||||
sc.c,
|
sc.c,
|
||||||
version,
|
version,
|
||||||
shutdownC,
|
shutdownC,
|
||||||
graceShutdownC,
|
graceShutdownC,
|
||||||
&connection.NamedTunnelConfig{Credentials: credentials},
|
&connection.NamedTunnelConfig{Credentials: credentials},
|
||||||
sc.logger,
|
sc.log,
|
||||||
sc.isUIEnabled,
|
sc.isUIEnabled,
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
@ -276,9 +283,9 @@ func (sc *subcommandContext) cleanupConnections(tunnelIDs []uuid.UUID) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
for _, tunnelID := range tunnelIDs {
|
for _, tunnelID := range tunnelIDs {
|
||||||
sc.logger.Infof("Cleanup connection for tunnel %s", tunnelID)
|
sc.log.Info().Msgf("Cleanup connection for tunnel %s", tunnelID)
|
||||||
if err := client.CleanupConnections(tunnelID); err != nil {
|
if err := client.CleanupConnections(tunnelID); err != nil {
|
||||||
sc.logger.Errorf("Error cleaning up connections for tunnel %v, error :%v", tunnelID, err)
|
sc.log.Error().Msgf("Error cleaning up connections for tunnel %v, error :%v", tunnelID, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -4,16 +4,15 @@ import (
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"flag"
|
"flag"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"reflect"
|
"reflect"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/tunnelstore"
|
"github.com/cloudflare/cloudflared/tunnelstore"
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -106,7 +105,7 @@ func (fs mockFileSystem) readFile(filePath string) ([]byte, error) {
|
||||||
func Test_subcommandContext_findCredentials(t *testing.T) {
|
func Test_subcommandContext_findCredentials(t *testing.T) {
|
||||||
type fields struct {
|
type fields struct {
|
||||||
c *cli.Context
|
c *cli.Context
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
isUIEnabled bool
|
isUIEnabled bool
|
||||||
fs fileSystem
|
fs fileSystem
|
||||||
tunnelstoreClient tunnelstore.Client
|
tunnelstoreClient tunnelstore.Client
|
||||||
|
@ -137,8 +136,7 @@ func Test_subcommandContext_findCredentials(t *testing.T) {
|
||||||
},
|
},
|
||||||
vfp: func(string) bool { return true },
|
vfp: func(string) bool { return true },
|
||||||
}
|
}
|
||||||
logger, err := logger.New()
|
log := zerolog.Nop()
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
|
@ -150,13 +148,13 @@ func Test_subcommandContext_findCredentials(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "Filepath given leads to old credentials file",
|
name: "Filepath given leads to old credentials file",
|
||||||
fields: fields{
|
fields: fields{
|
||||||
logger: logger,
|
log: &log,
|
||||||
fs: fs,
|
fs: fs,
|
||||||
c: func() *cli.Context {
|
c: func() *cli.Context {
|
||||||
flagSet := flag.NewFlagSet("test0", flag.PanicOnError)
|
flagSet := flag.NewFlagSet("test0", flag.PanicOnError)
|
||||||
flagSet.String(CredFileFlag, oldCertPath, "")
|
flagSet.String(CredFileFlag, oldCertPath, "")
|
||||||
c := cli.NewContext(cli.NewApp(), flagSet, nil)
|
c := cli.NewContext(cli.NewApp(), flagSet, nil)
|
||||||
err = c.Set(CredFileFlag, oldCertPath)
|
_ = c.Set(CredFileFlag, oldCertPath)
|
||||||
return c
|
return c
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
|
@ -172,13 +170,13 @@ func Test_subcommandContext_findCredentials(t *testing.T) {
|
||||||
{
|
{
|
||||||
name: "Filepath given leads to new credentials file",
|
name: "Filepath given leads to new credentials file",
|
||||||
fields: fields{
|
fields: fields{
|
||||||
logger: logger,
|
log: &log,
|
||||||
fs: fs,
|
fs: fs,
|
||||||
c: func() *cli.Context {
|
c: func() *cli.Context {
|
||||||
flagSet := flag.NewFlagSet("test0", flag.PanicOnError)
|
flagSet := flag.NewFlagSet("test0", flag.PanicOnError)
|
||||||
flagSet.String(CredFileFlag, newCertPath, "")
|
flagSet.String(CredFileFlag, newCertPath, "")
|
||||||
c := cli.NewContext(cli.NewApp(), flagSet, nil)
|
c := cli.NewContext(cli.NewApp(), flagSet, nil)
|
||||||
err = c.Set(CredFileFlag, newCertPath)
|
_ = c.Set(CredFileFlag, newCertPath)
|
||||||
return c
|
return c
|
||||||
}(),
|
}(),
|
||||||
},
|
},
|
||||||
|
@ -197,7 +195,7 @@ func Test_subcommandContext_findCredentials(t *testing.T) {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
sc := &subcommandContext{
|
sc := &subcommandContext{
|
||||||
c: tt.fields.c,
|
c: tt.fields.c,
|
||||||
logger: tt.fields.logger,
|
log: tt.fields.log,
|
||||||
isUIEnabled: tt.fields.isUIEnabled,
|
isUIEnabled: tt.fields.isUIEnabled,
|
||||||
fs: tt.fields.fs,
|
fs: tt.fields.fs,
|
||||||
tunnelstoreClient: tt.fields.tunnelstoreClient,
|
tunnelstoreClient: tt.fields.tunnelstoreClient,
|
||||||
|
|
|
@ -223,7 +223,7 @@ func fmtAndPrintTunnelList(tunnels []*tunnelstore.Tunnel, showRecentlyDisconnect
|
||||||
defer writer.Flush()
|
defer writer.Flush()
|
||||||
|
|
||||||
// Print column headers with tabbed columns
|
// Print column headers with tabbed columns
|
||||||
fmt.Fprintln(writer, "ID\tNAME\tCREATED\tCONNECTIONS\t")
|
_, _ = fmt.Fprintln(writer, "ID\tNAME\tCREATED\tCONNECTIONS\t")
|
||||||
|
|
||||||
// Loop through tunnels, create formatted string for each, and print using tabwriter
|
// Loop through tunnels, create formatted string for each, and print using tabwriter
|
||||||
for _, t := range tunnels {
|
for _, t := range tunnels {
|
||||||
|
@ -234,7 +234,7 @@ func fmtAndPrintTunnelList(tunnels []*tunnelstore.Tunnel, showRecentlyDisconnect
|
||||||
t.CreatedAt.Format(time.RFC3339),
|
t.CreatedAt.Format(time.RFC3339),
|
||||||
fmtConnections(t.Connections, showRecentlyDisconnected),
|
fmtConnections(t.Connections, showRecentlyDisconnected),
|
||||||
)
|
)
|
||||||
fmt.Fprintln(writer, formattedStr)
|
_, _ = fmt.Fprintln(writer, formattedStr)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -360,7 +360,7 @@ func runNamedTunnel(sc *subcommandContext, tunnelRef string) error {
|
||||||
return errors.Wrap(err, "error parsing tunnel ID")
|
return errors.Wrap(err, "error parsing tunnel ID")
|
||||||
}
|
}
|
||||||
|
|
||||||
sc.logger.Infof("Starting tunnel %s", tunnelID.String())
|
sc.log.Info().Msgf("Starting tunnel %s", tunnelID.String())
|
||||||
|
|
||||||
return sc.run(tunnelID)
|
return sc.run(tunnelID)
|
||||||
}
|
}
|
||||||
|
@ -515,7 +515,7 @@ func routeCommand(c *cli.Context) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
sc.logger.Infof(res.SuccessSummary())
|
sc.log.Info().Msg(res.SuccessSummary())
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -4,14 +4,13 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/ingress"
|
"github.com/cloudflare/cloudflared/ingress"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
|
|
||||||
"github.com/gdamore/tcell"
|
"github.com/gdamore/tcell"
|
||||||
"github.com/rivo/tview"
|
"github.com/rivo/tview"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type connState struct {
|
type connState struct {
|
||||||
|
@ -51,16 +50,16 @@ func NewUIModel(version, hostname, metricsURL string, ing *ingress.Ingress, haCo
|
||||||
|
|
||||||
func (data *uiModel) LaunchUI(
|
func (data *uiModel) LaunchUI(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
generalLogger, transportLogger logger.Service,
|
log, transportLog *zerolog.Logger,
|
||||||
logLevels []logger.Level,
|
|
||||||
tunnelEventChan <-chan connection.Event,
|
tunnelEventChan <-chan connection.Event,
|
||||||
) {
|
) {
|
||||||
// Configure the logger to stream logs into the textview
|
// Configure the logger to stream logs into the textview
|
||||||
|
|
||||||
// Add TextView as a group to write output to
|
// Add TextView as a group to write output to
|
||||||
logTextView := NewDynamicColorTextView()
|
logTextView := NewDynamicColorTextView()
|
||||||
generalLogger.Add(logTextView, logger.NewUIFormatter(time.RFC3339), logLevels...)
|
// TODO: Format log for UI
|
||||||
transportLogger.Add(logTextView, logger.NewUIFormatter(time.RFC3339), logLevels...)
|
//log.Add(logTextView, logger.NewUIFormatter(time.RFC3339), logLevels...)
|
||||||
|
//transportLog.Add(logTextView, logger.NewUIFormatter(time.RFC3339), logLevels...)
|
||||||
|
|
||||||
// Construct the UI
|
// Construct the UI
|
||||||
palette := palette{
|
palette := palette{
|
||||||
|
@ -125,7 +124,7 @@ func (data *uiModel) LaunchUI(
|
||||||
case connection.Connected:
|
case connection.Connected:
|
||||||
data.setConnTableCell(event, connTable, palette)
|
data.setConnTableCell(event, connTable, palette)
|
||||||
case connection.Disconnected, connection.Reconnecting:
|
case connection.Disconnected, connection.Reconnecting:
|
||||||
data.changeConnStatus(event, connTable, generalLogger, palette)
|
data.changeConnStatus(event, connTable, log, palette)
|
||||||
case connection.SetURL:
|
case connection.SetURL:
|
||||||
tunnelHostText.SetText(event.URL)
|
tunnelHostText.SetText(event.URL)
|
||||||
data.edgeURL = event.URL
|
data.edgeURL = event.URL
|
||||||
|
@ -141,7 +140,7 @@ func (data *uiModel) LaunchUI(
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
if err := app.SetRoot(frame, true).Run(); err != nil {
|
if err := app.SetRoot(frame, true).Run(); err != nil {
|
||||||
generalLogger.Errorf("Error launching UI: %s", err)
|
log.Error().Msgf("Error launching UI: %s", err)
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
}
|
}
|
||||||
|
@ -159,13 +158,13 @@ func handleNewText(app *tview.Application, logTextView *tview.TextView) func() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (data *uiModel) changeConnStatus(event connection.Event, table *tview.Table, logger logger.Service, palette palette) {
|
func (data *uiModel) changeConnStatus(event connection.Event, table *tview.Table, log *zerolog.Logger, palette palette) {
|
||||||
index := int(event.Index)
|
index := int(event.Index)
|
||||||
// Get connection location and state
|
// Get connection location and state
|
||||||
connState := data.getConnState(index)
|
connState := data.getConnState(index)
|
||||||
// Check if connection is already displayed in UI
|
// Check if connection is already displayed in UI
|
||||||
if connState == nil {
|
if connState == nil {
|
||||||
logger.Info("Connection is not in the UI table")
|
log.Info().Msg("Connection is not in the UI table")
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -3,6 +3,7 @@ package updater
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"runtime"
|
"runtime"
|
||||||
|
@ -14,12 +15,10 @@ import (
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/cloudflare/cloudflared/logger"
|
||||||
"github.com/facebookgo/grace/gracenet"
|
"github.com/facebookgo/grace/gracenet"
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
DefaultCheckUpdateFreq = time.Hour * 24
|
DefaultCheckUpdateFreq = time.Hour * 24
|
||||||
appID = "app_idCzgxYerVD"
|
|
||||||
noUpdateInShellMessage = "cloudflared will not automatically update when run from the shell. To enable auto-updates, run cloudflared as a service: https://developers.cloudflare.com/argo-tunnel/reference/service/"
|
noUpdateInShellMessage = "cloudflared will not automatically update when run from the shell. To enable auto-updates, run cloudflared as a service: https://developers.cloudflare.com/argo-tunnel/reference/service/"
|
||||||
noUpdateOnWindowsMessage = "cloudflared will not automatically update on Windows systems."
|
noUpdateOnWindowsMessage = "cloudflared will not automatically update on Windows systems."
|
||||||
noUpdateManagedPackageMessage = "cloudflared will not automatically update if installed by a package manager."
|
noUpdateManagedPackageMessage = "cloudflared will not automatically update if installed by a package manager."
|
||||||
|
@ -114,38 +113,35 @@ func checkForUpdateAndApply(options updateOptions) UpdateOutcome {
|
||||||
|
|
||||||
// Update is the handler for the update command from the command line
|
// Update is the handler for the update command from the command line
|
||||||
func Update(c *cli.Context) error {
|
func Update(c *cli.Context) error {
|
||||||
logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
if wasInstalledFromPackageManager() {
|
if wasInstalledFromPackageManager() {
|
||||||
logger.Error("cloudflared was installed by a package manager. Please update using the same method.")
|
log.Error().Msg("cloudflared was installed by a package manager. Please update using the same method.")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
isBeta := c.Bool("beta")
|
isBeta := c.Bool("beta")
|
||||||
if isBeta {
|
if isBeta {
|
||||||
logger.Info("cloudflared is set to update to the latest beta version")
|
log.Info().Msg("cloudflared is set to update to the latest beta version")
|
||||||
}
|
}
|
||||||
|
|
||||||
isStaging := c.Bool("staging")
|
isStaging := c.Bool("staging")
|
||||||
if isStaging {
|
if isStaging {
|
||||||
logger.Info("cloudflared is set to update from staging")
|
log.Info().Msg("cloudflared is set to update from staging")
|
||||||
}
|
}
|
||||||
|
|
||||||
isForced := c.Bool("force")
|
isForced := c.Bool("force")
|
||||||
if isForced {
|
if isForced {
|
||||||
logger.Info("cloudflared is set to upgrade to the latest publish version regardless of the current version")
|
log.Info().Msg("cloudflared is set to upgrade to the latest publish version regardless of the current version")
|
||||||
}
|
}
|
||||||
|
|
||||||
updateOutcome := loggedUpdate(logger, updateOptions{isBeta: isBeta, isStaging: isStaging, isForced: isForced, version: c.String("version")})
|
updateOutcome := loggedUpdate(log, updateOptions{isBeta: isBeta, isStaging: isStaging, isForced: isForced, version: c.String("version")})
|
||||||
if updateOutcome.Error != nil {
|
if updateOutcome.Error != nil {
|
||||||
return &statusErr{updateOutcome.Error}
|
return &statusErr{updateOutcome.Error}
|
||||||
}
|
}
|
||||||
|
|
||||||
if updateOutcome.noUpdate() {
|
if updateOutcome.noUpdate() {
|
||||||
logger.Infof("cloudflared is up to date (%s)", updateOutcome.Version)
|
log.Info().Msgf("cloudflared is up to date (%s)", updateOutcome.Version)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -153,13 +149,13 @@ func Update(c *cli.Context) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Checks for an update and applies it if one is available
|
// Checks for an update and applies it if one is available
|
||||||
func loggedUpdate(logger logger.Service, options updateOptions) UpdateOutcome {
|
func loggedUpdate(log *zerolog.Logger, options updateOptions) UpdateOutcome {
|
||||||
updateOutcome := checkForUpdateAndApply(options)
|
updateOutcome := checkForUpdateAndApply(options)
|
||||||
if updateOutcome.Updated {
|
if updateOutcome.Updated {
|
||||||
logger.Infof("cloudflared has been updated to version %s", updateOutcome.Version)
|
log.Info().Msgf("cloudflared has been updated to version %s", updateOutcome.Version)
|
||||||
}
|
}
|
||||||
if updateOutcome.Error != nil {
|
if updateOutcome.Error != nil {
|
||||||
logger.Errorf("update check failed: %s", updateOutcome.Error)
|
log.Error().Msgf("update check failed: %s", updateOutcome.Error)
|
||||||
}
|
}
|
||||||
|
|
||||||
return updateOutcome
|
return updateOutcome
|
||||||
|
@ -170,7 +166,7 @@ type AutoUpdater struct {
|
||||||
configurable *configurable
|
configurable *configurable
|
||||||
listeners *gracenet.Net
|
listeners *gracenet.Net
|
||||||
updateConfigChan chan *configurable
|
updateConfigChan chan *configurable
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// AutoUpdaterConfigurable is the attributes of AutoUpdater that can be reconfigured during runtime
|
// AutoUpdaterConfigurable is the attributes of AutoUpdater that can be reconfigured during runtime
|
||||||
|
@ -179,7 +175,7 @@ type configurable struct {
|
||||||
freq time.Duration
|
freq time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewAutoUpdater(freq time.Duration, listeners *gracenet.Net, logger logger.Service) *AutoUpdater {
|
func NewAutoUpdater(freq time.Duration, listeners *gracenet.Net, log *zerolog.Logger) *AutoUpdater {
|
||||||
updaterConfigurable := &configurable{
|
updaterConfigurable := &configurable{
|
||||||
enabled: true,
|
enabled: true,
|
||||||
freq: freq,
|
freq: freq,
|
||||||
|
@ -192,7 +188,7 @@ func NewAutoUpdater(freq time.Duration, listeners *gracenet.Net, logger logger.S
|
||||||
configurable: updaterConfigurable,
|
configurable: updaterConfigurable,
|
||||||
listeners: listeners,
|
listeners: listeners,
|
||||||
updateConfigChan: make(chan *configurable),
|
updateConfigChan: make(chan *configurable),
|
||||||
logger: logger,
|
log: log,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -200,19 +196,19 @@ func (a *AutoUpdater) Run(ctx context.Context) error {
|
||||||
ticker := time.NewTicker(a.configurable.freq)
|
ticker := time.NewTicker(a.configurable.freq)
|
||||||
for {
|
for {
|
||||||
if a.configurable.enabled {
|
if a.configurable.enabled {
|
||||||
updateOutcome := loggedUpdate(a.logger, updateOptions{})
|
updateOutcome := loggedUpdate(a.log, updateOptions{})
|
||||||
if updateOutcome.Updated {
|
if updateOutcome.Updated {
|
||||||
if IsSysV() {
|
if IsSysV() {
|
||||||
// SysV doesn't have a mechanism to keep service alive, we have to restart the process
|
// SysV doesn't have a mechanism to keep service alive, we have to restart the process
|
||||||
a.logger.Info("Restarting service managed by SysV...")
|
a.log.Info().Msg("Restarting service managed by SysV...")
|
||||||
pid, err := a.listeners.StartProcess()
|
pid, err := a.listeners.StartProcess()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
a.logger.Errorf("Unable to restart server automatically: %s", err)
|
a.log.Error().Msgf("Unable to restart server automatically: %s", err)
|
||||||
return &statusErr{err: err}
|
return &statusErr{err: err}
|
||||||
}
|
}
|
||||||
// stop old process after autoupdate. Otherwise we create a new process
|
// stop old process after autoupdate. Otherwise we create a new process
|
||||||
// after each update
|
// after each update
|
||||||
a.logger.Infof("PID of the new process is %d", pid)
|
a.log.Info().Msgf("PID of the new process is %d", pid)
|
||||||
}
|
}
|
||||||
return &statusSuccess{newVersion: updateOutcome.Version}
|
return &statusSuccess{newVersion: updateOutcome.Version}
|
||||||
}
|
}
|
||||||
|
@ -244,26 +240,26 @@ func (a *AutoUpdater) Update(newFreq time.Duration) {
|
||||||
a.updateConfigChan <- newConfigurable
|
a.updateConfigChan <- newConfigurable
|
||||||
}
|
}
|
||||||
|
|
||||||
func IsAutoupdateEnabled(c *cli.Context, l logger.Service) bool {
|
func IsAutoupdateEnabled(c *cli.Context, log *zerolog.Logger) bool {
|
||||||
if !SupportAutoUpdate(l) {
|
if !SupportAutoUpdate(log) {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return !c.Bool("no-autoupdate") && c.Duration("autoupdate-freq") != 0
|
return !c.Bool("no-autoupdate") && c.Duration("autoupdate-freq") != 0
|
||||||
}
|
}
|
||||||
|
|
||||||
func SupportAutoUpdate(logger logger.Service) bool {
|
func SupportAutoUpdate(log *zerolog.Logger) bool {
|
||||||
if runtime.GOOS == "windows" {
|
if runtime.GOOS == "windows" {
|
||||||
logger.Info(noUpdateOnWindowsMessage)
|
log.Info().Msg(noUpdateOnWindowsMessage)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if wasInstalledFromPackageManager() {
|
if wasInstalledFromPackageManager() {
|
||||||
logger.Info(noUpdateManagedPackageMessage)
|
log.Info().Msg(noUpdateManagedPackageMessage)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
if isRunningFromTerminal() {
|
if isRunningFromTerminal() {
|
||||||
logger.Info(noUpdateInShellMessage)
|
log.Info().Msg(noUpdateInShellMessage)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -4,15 +4,15 @@ import (
|
||||||
"context"
|
"context"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/facebookgo/grace/gracenet"
|
"github.com/facebookgo/grace/gracenet"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestDisabledAutoUpdater(t *testing.T) {
|
func TestDisabledAutoUpdater(t *testing.T) {
|
||||||
listeners := &gracenet.Net{}
|
listeners := &gracenet.Net{}
|
||||||
logger := logger.NewOutputWriter(logger.NewMockWriteManager())
|
log := zerolog.Nop()
|
||||||
autoupdater := NewAutoUpdater(0, listeners, logger)
|
autoupdater := NewAutoUpdater(0, listeners, &log)
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
errC := make(chan error)
|
errC := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
|
|
|
@ -13,9 +13,8 @@ import (
|
||||||
"unsafe"
|
"unsafe"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/cloudflare/cloudflared/logger"
|
||||||
"github.com/pkg/errors"
|
|
||||||
cli "github.com/urfave/cli/v2"
|
|
||||||
|
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
"golang.org/x/sys/windows"
|
"golang.org/x/sys/windows"
|
||||||
"golang.org/x/sys/windows/svc"
|
"golang.org/x/sys/windows/svc"
|
||||||
"golang.org/x/sys/windows/svc/eventlog"
|
"golang.org/x/sys/windows/svc/eventlog"
|
||||||
|
@ -67,15 +66,11 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
||||||
// 2. get ERROR_FAILED_SERVICE_CONTROLLER_CONNECT
|
// 2. get ERROR_FAILED_SERVICE_CONTROLLER_CONNECT
|
||||||
// This involves actually trying to start the service.
|
// This involves actually trying to start the service.
|
||||||
|
|
||||||
logger, err := logger.New()
|
log := logger.Create(nil)
|
||||||
if err != nil {
|
|
||||||
os.Exit(1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
isIntSess, err := svc.IsAnInteractiveSession()
|
isIntSess, err := svc.IsAnInteractiveSession()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Fatalf("failed to determine if we are running in an interactive session: %v", err)
|
log.Fatal().Msgf("failed to determine if we are running in an interactive session: %v", err)
|
||||||
}
|
}
|
||||||
if isIntSess {
|
if isIntSess {
|
||||||
app.Run(os.Args)
|
app.Run(os.Args)
|
||||||
|
@ -93,7 +88,7 @@ func runApp(app *cli.App, shutdownC, graceShutdownC chan struct{}) {
|
||||||
app.Run(os.Args)
|
app.Run(os.Args)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
logger.Fatalf("%s service failed: %v", windowsServiceName, err)
|
log.Fatal().Msgf("%s service failed: %v", windowsServiceName, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -105,15 +100,10 @@ type windowsService struct {
|
||||||
|
|
||||||
// called by the package code at the start of the service
|
// called by the package code at the start of the service
|
||||||
func (s *windowsService) Execute(serviceArgs []string, r <-chan svc.ChangeRequest, statusChan chan<- svc.Status) (ssec bool, errno uint32) {
|
func (s *windowsService) Execute(serviceArgs []string, r <-chan svc.ChangeRequest, statusChan chan<- svc.Status) (ssec bool, errno uint32) {
|
||||||
logger, err := logger.New()
|
log := logger.Create(nil)
|
||||||
if err != nil {
|
|
||||||
os.Exit(1)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
elog, err := eventlog.Open(windowsServiceName)
|
elog, err := eventlog.Open(windowsServiceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot open event log for %s with error: %s", windowsServiceName, err)
|
log.Error().Msgf("Cannot open event log for %s with error: %s", windowsServiceName, err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer elog.Close()
|
defer elog.Close()
|
||||||
|
@ -173,79 +163,73 @@ func (s *windowsService) Execute(serviceArgs []string, r <-chan svc.ChangeReques
|
||||||
}
|
}
|
||||||
|
|
||||||
func installWindowsService(c *cli.Context) error {
|
func installWindowsService(c *cli.Context) error {
|
||||||
logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Infof("Installing Argo Tunnel Windows service")
|
log.Info().Msgf("Installing Argo Tunnel Windows service")
|
||||||
exepath, err := os.Executable()
|
exepath, err := os.Executable()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot find path name that start the process")
|
log.Error().Msgf("Cannot find path name that start the process")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
m, err := mgr.Connect()
|
m, err := mgr.Connect()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot establish a connection to the service control manager: %s", err)
|
log.Error().Msgf("Cannot establish a connection to the service control manager: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer m.Disconnect()
|
defer m.Disconnect()
|
||||||
s, err := m.OpenService(windowsServiceName)
|
s, err := m.OpenService(windowsServiceName)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
s.Close()
|
s.Close()
|
||||||
logger.Errorf("service %s already exists", windowsServiceName)
|
log.Error().Msgf("service %s already exists", windowsServiceName)
|
||||||
return fmt.Errorf("service %s already exists", windowsServiceName)
|
return fmt.Errorf("service %s already exists", windowsServiceName)
|
||||||
}
|
}
|
||||||
config := mgr.Config{StartType: mgr.StartAutomatic, DisplayName: windowsServiceDescription}
|
config := mgr.Config{StartType: mgr.StartAutomatic, DisplayName: windowsServiceDescription}
|
||||||
s, err = m.CreateService(windowsServiceName, exepath, config)
|
s, err = m.CreateService(windowsServiceName, exepath, config)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot install service %s", windowsServiceName)
|
log.Error().Msgf("Cannot install service %s", windowsServiceName)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
logger.Infof("Argo Tunnel agent service is installed")
|
log.Info().Msgf("Argo Tunnel agent service is installed")
|
||||||
err = eventlog.InstallAsEventCreate(windowsServiceName, eventlog.Error|eventlog.Warning|eventlog.Info)
|
err = eventlog.InstallAsEventCreate(windowsServiceName, eventlog.Error|eventlog.Warning|eventlog.Info)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.Delete()
|
s.Delete()
|
||||||
logger.Errorf("Cannot install event logger: %s", err)
|
log.Error().Msgf("Cannot install event logger: %s", err)
|
||||||
return fmt.Errorf("SetupEventLogSource() failed: %s", err)
|
return fmt.Errorf("SetupEventLogSource() failed: %s", err)
|
||||||
}
|
}
|
||||||
err = configRecoveryOption(s.Handle)
|
err = configRecoveryOption(s.Handle)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot set service recovery actions: %s", err)
|
log.Error().Msgf("Cannot set service recovery actions: %s", err)
|
||||||
logger.Infof("See %s to manually configure service recovery actions", windowsServiceUrl)
|
log.Info().Msgf("See %s to manually configure service recovery actions", windowsServiceUrl)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func uninstallWindowsService(c *cli.Context) error {
|
func uninstallWindowsService(c *cli.Context) error {
|
||||||
logger, err := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.Infof("Uninstalling Argo Tunnel Windows Service")
|
log.Info().Msgf("Uninstalling Argo Tunnel Windows Service")
|
||||||
m, err := mgr.Connect()
|
m, err := mgr.Connect()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot establish a connection to the service control manager")
|
log.Error().Msgf("Cannot establish a connection to the service control manager")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
defer m.Disconnect()
|
defer m.Disconnect()
|
||||||
s, err := m.OpenService(windowsServiceName)
|
s, err := m.OpenService(windowsServiceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("service %s is not installed", windowsServiceName)
|
log.Error().Msgf("service %s is not installed", windowsServiceName)
|
||||||
return fmt.Errorf("service %s is not installed", windowsServiceName)
|
return fmt.Errorf("service %s is not installed", windowsServiceName)
|
||||||
}
|
}
|
||||||
defer s.Close()
|
defer s.Close()
|
||||||
err = s.Delete()
|
err = s.Delete()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot delete service %s", windowsServiceName)
|
log.Error().Msgf("Cannot delete service %s", windowsServiceName)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logger.Infof("Argo Tunnel agent service is uninstalled")
|
log.Info().Msgf("Argo Tunnel agent service is uninstalled")
|
||||||
err = eventlog.Remove(windowsServiceName)
|
err = eventlog.Remove(windowsServiceName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Cannot remove event logger")
|
log.Error().Msgf("Cannot remove event logger")
|
||||||
return fmt.Errorf("RemoveEventLogSource() failed: %s", err)
|
return fmt.Errorf("RemoveEventLogSource() failed: %s", err)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
|
@ -8,8 +8,8 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/gobwas/ws/wsutil"
|
"github.com/gobwas/ws/wsutil"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,14 +22,14 @@ var (
|
||||||
OriginClient: &mockOriginClient{},
|
OriginClient: &mockOriginClient{},
|
||||||
GracePeriod: time.Millisecond * 100,
|
GracePeriod: time.Millisecond * 100,
|
||||||
}
|
}
|
||||||
testLogger, _ = logger.New()
|
log = zerolog.Nop()
|
||||||
testOriginURL = &url.URL{
|
testOriginURL = &url.URL{
|
||||||
Scheme: "https",
|
Scheme: "https",
|
||||||
Host: "connectiontest.argotunnel.com",
|
Host: "connectiontest.argotunnel.com",
|
||||||
}
|
}
|
||||||
testTunnelEventChan = make(chan Event)
|
testTunnelEventChan = make(chan Event)
|
||||||
testObserver = &Observer{
|
testObserver = &Observer{
|
||||||
testLogger,
|
&log,
|
||||||
m,
|
m,
|
||||||
[]chan Event{testTunnelEventChan},
|
[]chan Event{testTunnelEventChan},
|
||||||
false,
|
false,
|
||||||
|
@ -81,7 +81,7 @@ func wsEndpoint(w ResponseWriter, r *http.Request) error {
|
||||||
resp := &http.Response{
|
resp := &http.Response{
|
||||||
StatusCode: http.StatusSwitchingProtocols,
|
StatusCode: http.StatusSwitchingProtocols,
|
||||||
}
|
}
|
||||||
w.WriteRespHeaders(resp)
|
_ = w.WriteRespHeaders(resp)
|
||||||
clientReader := nowriter{r.Body}
|
clientReader := nowriter{r.Body}
|
||||||
go func() {
|
go func() {
|
||||||
for {
|
for {
|
||||||
|
@ -102,8 +102,8 @@ func originRespEndpoint(w ResponseWriter, status int, data []byte) {
|
||||||
resp := &http.Response{
|
resp := &http.Response{
|
||||||
StatusCode: status,
|
StatusCode: status,
|
||||||
}
|
}
|
||||||
w.WriteRespHeaders(resp)
|
_ = w.WriteRespHeaders(resp)
|
||||||
w.Write(data)
|
_, _ = w.Write(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
type mockConnectedFuse struct{}
|
type mockConnectedFuse struct{}
|
||||||
|
|
|
@ -65,11 +65,11 @@ func (e muxerShutdownError) Error() string {
|
||||||
func isHandshakeErrRecoverable(err error, connIndex uint8, observer *Observer) bool {
|
func isHandshakeErrRecoverable(err error, connIndex uint8, observer *Observer) bool {
|
||||||
switch err.(type) {
|
switch err.(type) {
|
||||||
case edgediscovery.DialError:
|
case edgediscovery.DialError:
|
||||||
observer.Errorf("Connection %d unable to dial edge: %s", connIndex, err)
|
observer.log.Error().Msgf("Connection %d unable to dial edge: %s", connIndex, err)
|
||||||
case h2mux.MuxerHandshakeError:
|
case h2mux.MuxerHandshakeError:
|
||||||
observer.Errorf("Connection %d handshake with edge server failed: %s", connIndex, err)
|
observer.log.Error().Msgf("Connection %d handshake with edge server failed: %s", connIndex, err)
|
||||||
default:
|
default:
|
||||||
observer.Errorf("Connection %d failed: %s", connIndex, err)
|
observer.log.Error().Msgf("Connection %d failed: %s", connIndex, err)
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
return true
|
return true
|
||||||
|
|
|
@ -7,10 +7,11 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/h2mux"
|
"github.com/cloudflare/cloudflared/h2mux"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
"github.com/cloudflare/cloudflared/websocket"
|
"github.com/cloudflare/cloudflared/websocket"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -37,14 +38,14 @@ type MuxerConfig struct {
|
||||||
MetricsUpdateFreq time.Duration
|
MetricsUpdateFreq time.Duration
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mc *MuxerConfig) H2MuxerConfig(h h2mux.MuxedStreamHandler, logger logger.Service) *h2mux.MuxerConfig {
|
func (mc *MuxerConfig) H2MuxerConfig(h h2mux.MuxedStreamHandler, log *zerolog.Logger) *h2mux.MuxerConfig {
|
||||||
return &h2mux.MuxerConfig{
|
return &h2mux.MuxerConfig{
|
||||||
Timeout: muxerTimeout,
|
Timeout: muxerTimeout,
|
||||||
Handler: h,
|
Handler: h,
|
||||||
IsClient: true,
|
IsClient: true,
|
||||||
HeartbeatInterval: mc.HeartbeatInterval,
|
HeartbeatInterval: mc.HeartbeatInterval,
|
||||||
MaxHeartbeats: mc.MaxHeartbeats,
|
MaxHeartbeats: mc.MaxHeartbeats,
|
||||||
Logger: logger,
|
Log: log,
|
||||||
CompressionQuality: mc.CompressionSetting,
|
CompressionQuality: mc.CompressionSetting,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -67,7 +68,7 @@ func NewH2muxConnection(ctx context.Context,
|
||||||
|
|
||||||
// Establish a muxed connection with the edge
|
// Establish a muxed connection with the edge
|
||||||
// Client mux handshake with agent server
|
// Client mux handshake with agent server
|
||||||
muxer, err := h2mux.Handshake(edgeConn, edgeConn, *muxerConfig.H2MuxerConfig(h, observer), h2mux.ActiveStreams)
|
muxer, err := h2mux.Handshake(edgeConn, edgeConn, *muxerConfig.H2MuxerConfig(h, observer.log), h2mux.ActiveStreams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
recoverable := isHandshakeErrRecoverable(err, connIndex, observer)
|
recoverable := isHandshakeErrRecoverable(err, connIndex, observer)
|
||||||
return nil, err, recoverable
|
return nil, err, recoverable
|
||||||
|
@ -87,7 +88,7 @@ func (h *h2muxConnection) ServeNamedTunnel(ctx context.Context, namedTunnel *Nam
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rpcClient := newRegistrationRPCClient(ctx, stream, h.observer)
|
rpcClient := newRegistrationRPCClient(ctx, stream, h.observer.log)
|
||||||
defer rpcClient.Close()
|
defer rpcClient.Close()
|
||||||
|
|
||||||
if err = rpcClient.RegisterConnection(serveCtx, namedTunnel, connOptions, h.connIndex, h.observer); err != nil {
|
if err = rpcClient.RegisterConnection(serveCtx, namedTunnel, connOptions, h.connIndex, h.observer); err != nil {
|
||||||
|
@ -122,7 +123,7 @@ func (h *h2muxConnection) ServeClassicTunnel(ctx context.Context, classicTunnel
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
// log errors and proceed to RegisterTunnel
|
// log errors and proceed to RegisterTunnel
|
||||||
h.observer.Errorf("Couldn't reconnect connection %d. Reregistering it instead. Error was: %v", h.connIndex, err)
|
h.observer.log.Error().Msgf("Couldn't reconnect connection %d. Reregistering it instead. Error was: %v", h.connIndex, err)
|
||||||
}
|
}
|
||||||
return h.registerTunnel(ctx, credentialManager, classicTunnel, registrationOptions)
|
return h.registerTunnel(ctx, credentialManager, classicTunnel, registrationOptions)
|
||||||
})
|
})
|
||||||
|
@ -212,9 +213,9 @@ func (rp *h2muxRespWriter) WriteRespHeaders(resp *http.Response) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rp *h2muxRespWriter) WriteErrorResponse() {
|
func (rp *h2muxRespWriter) WriteErrorResponse() {
|
||||||
rp.WriteHeaders([]h2mux.Header{
|
_ = rp.WriteHeaders([]h2mux.Header{
|
||||||
{Name: ":status", Value: "502"},
|
{Name: ":status", Value: "502"},
|
||||||
{Name: ResponseMetaHeaderField, Value: responseMetaHeaderCfd},
|
{Name: ResponseMetaHeaderField, Value: responseMetaHeaderCfd},
|
||||||
})
|
})
|
||||||
rp.Write([]byte("502 Bad Gateway"))
|
_, _ = rp.Write([]byte("502 Bad Gateway"))
|
||||||
}
|
}
|
||||||
|
|
|
@ -31,7 +31,7 @@ func newH2MuxConnection(ctx context.Context, t require.TestingT) (*h2muxConnecti
|
||||||
edgeMuxChan := make(chan *h2mux.Muxer)
|
edgeMuxChan := make(chan *h2mux.Muxer)
|
||||||
go func() {
|
go func() {
|
||||||
edgeMuxConfig := h2mux.MuxerConfig{
|
edgeMuxConfig := h2mux.MuxerConfig{
|
||||||
Logger: testObserver,
|
Log: testObserver.log,
|
||||||
}
|
}
|
||||||
edgeMux, err := h2mux.Handshake(edgeConn, edgeConn, edgeMuxConfig, h2mux.ActiveStreams)
|
edgeMux, err := h2mux.Handshake(edgeConn, edgeConn, edgeMuxConfig, h2mux.ActiveStreams)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
@ -85,7 +85,7 @@ func TestServeStreamHTTP(t *testing.T) {
|
||||||
wg.Add(2)
|
wg.Add(2)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
edgeMux.Serve(ctx)
|
_ = edgeMux.Serve(ctx)
|
||||||
}()
|
}()
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
|
|
|
@ -11,9 +11,9 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/h2mux"
|
"github.com/cloudflare/cloudflared/h2mux"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ type http2Connection struct {
|
||||||
connIndex uint8
|
connIndex uint8
|
||||||
wg *sync.WaitGroup
|
wg *sync.WaitGroup
|
||||||
// newRPCClientFunc allows us to mock RPCs during testing
|
// newRPCClientFunc allows us to mock RPCs during testing
|
||||||
newRPCClientFunc func(context.Context, io.ReadWriteCloser, logger.Service) NamedTunnelRPCClient
|
newRPCClientFunc func(context.Context, io.ReadWriteCloser, *zerolog.Logger) NamedTunnelRPCClient
|
||||||
connectedFuse ConnectedFuse
|
connectedFuse ConnectedFuse
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ func (c *http2Connection) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
flusher, isFlusher := w.(http.Flusher)
|
flusher, isFlusher := w.(http.Flusher)
|
||||||
if !isFlusher {
|
if !isFlusher {
|
||||||
c.observer.Errorf("%T doesn't implement http.Flusher", w)
|
c.observer.log.Error().Msgf("%T doesn't implement http.Flusher", w)
|
||||||
respWriter.WriteErrorResponse()
|
respWriter.WriteErrorResponse()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
@ -112,7 +112,7 @@ func (c *http2Connection) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *http2Connection) serveControlStream(ctx context.Context, respWriter *http2RespWriter) error {
|
func (c *http2Connection) serveControlStream(ctx context.Context, respWriter *http2RespWriter) error {
|
||||||
rpcClient := c.newRPCClientFunc(ctx, respWriter, c.observer)
|
rpcClient := c.newRPCClientFunc(ctx, respWriter, c.observer.log)
|
||||||
defer rpcClient.Close()
|
defer rpcClient.Close()
|
||||||
|
|
||||||
if err := rpcClient.RegisterConnection(ctx, c.namedTunnel, c.connOptions, c.connIndex, c.observer); err != nil {
|
if err := rpcClient.RegisterConnection(ctx, c.namedTunnel, c.connOptions, c.connIndex, c.observer); err != nil {
|
||||||
|
|
|
@ -12,10 +12,11 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
|
|
||||||
"github.com/gobwas/ws/wsutil"
|
"github.com/gobwas/ws/wsutil"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
)
|
)
|
||||||
|
@ -136,7 +137,7 @@ type mockRPCClientFactory struct {
|
||||||
unregistered chan struct{}
|
unregistered chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mf *mockRPCClientFactory) newMockRPCClient(context.Context, io.ReadWriteCloser, logger.Service) NamedTunnelRPCClient {
|
func (mf *mockRPCClientFactory) newMockRPCClient(context.Context, io.ReadWriteCloser, *zerolog.Logger) NamedTunnelRPCClient {
|
||||||
return mockNamedTunnelRPCClient{
|
return mockNamedTunnelRPCClient{
|
||||||
registered: mf.registered,
|
registered: mf.registered,
|
||||||
unregistered: mf.unregistered,
|
unregistered: mf.unregistered,
|
||||||
|
|
|
@ -5,20 +5,21 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Observer struct {
|
type Observer struct {
|
||||||
logger.Service
|
log *zerolog.Logger
|
||||||
metrics *tunnelMetrics
|
metrics *tunnelMetrics
|
||||||
tunnelEventChans []chan Event
|
tunnelEventChans []chan Event
|
||||||
uiEnabled bool
|
uiEnabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewObserver(logger logger.Service, tunnelEventChans []chan Event, uiEnabled bool) *Observer {
|
func NewObserver(log *zerolog.Logger, tunnelEventChans []chan Event, uiEnabled bool) *Observer {
|
||||||
return &Observer{
|
return &Observer{
|
||||||
logger,
|
log,
|
||||||
newTunnelMetrics(),
|
newTunnelMetrics(),
|
||||||
tunnelEventChans,
|
tunnelEventChans,
|
||||||
uiEnabled,
|
uiEnabled,
|
||||||
|
@ -27,7 +28,7 @@ func NewObserver(logger logger.Service, tunnelEventChans []chan Event, uiEnabled
|
||||||
|
|
||||||
func (o *Observer) logServerInfo(connIndex uint8, location, msg string) {
|
func (o *Observer) logServerInfo(connIndex uint8, location, msg string) {
|
||||||
o.sendEvent(Event{Index: connIndex, EventType: Connected, Location: location})
|
o.sendEvent(Event{Index: connIndex, EventType: Connected, Location: location})
|
||||||
o.Infof(msg)
|
o.log.Info().Msgf(msg)
|
||||||
o.metrics.registerServerLocation(uint8ToString(connIndex), location)
|
o.metrics.registerServerLocation(uint8ToString(connIndex), location)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -36,10 +37,10 @@ func (o *Observer) logTrialHostname(registration *tunnelpogs.TunnelRegistration)
|
||||||
if !o.uiEnabled {
|
if !o.uiEnabled {
|
||||||
if registrationURL, err := url.Parse(registration.Url); err == nil {
|
if registrationURL, err := url.Parse(registration.Url); err == nil {
|
||||||
for _, line := range asciiBox(trialZoneMsg(registrationURL.String()), 2) {
|
for _, line := range asciiBox(trialZoneMsg(registrationURL.String()), 2) {
|
||||||
o.Info(line)
|
o.log.Info().Msg(line)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
o.Error("Failed to connect tunnel, please try again.")
|
o.log.Error().Msg("Failed to connect tunnel, please try again.")
|
||||||
return fmt.Errorf("empty URL in response from Cloudflare edge")
|
return fmt.Errorf("empty URL in response from Cloudflare edge")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -89,7 +89,7 @@ type autoProtocolSelector struct {
|
||||||
fetchFunc PercentageFetcher
|
fetchFunc PercentageFetcher
|
||||||
refreshAfter time.Time
|
refreshAfter time.Time
|
||||||
ttl time.Duration
|
ttl time.Duration
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
func newAutoProtocolSelector(
|
func newAutoProtocolSelector(
|
||||||
|
@ -97,7 +97,7 @@ func newAutoProtocolSelector(
|
||||||
switchThrehold int32,
|
switchThrehold int32,
|
||||||
fetchFunc PercentageFetcher,
|
fetchFunc PercentageFetcher,
|
||||||
ttl time.Duration,
|
ttl time.Duration,
|
||||||
logger logger.Service,
|
log *zerolog.Logger,
|
||||||
) *autoProtocolSelector {
|
) *autoProtocolSelector {
|
||||||
return &autoProtocolSelector{
|
return &autoProtocolSelector{
|
||||||
current: current,
|
current: current,
|
||||||
|
@ -105,7 +105,7 @@ func newAutoProtocolSelector(
|
||||||
fetchFunc: fetchFunc,
|
fetchFunc: fetchFunc,
|
||||||
refreshAfter: time.Now().Add(ttl),
|
refreshAfter: time.Now().Add(ttl),
|
||||||
ttl: ttl,
|
ttl: ttl,
|
||||||
logger: logger,
|
log: log,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -118,7 +118,7 @@ func (s *autoProtocolSelector) Current() Protocol {
|
||||||
|
|
||||||
percentage, err := s.fetchFunc()
|
percentage, err := s.fetchFunc()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Errorf("Failed to refresh protocol, err: %v", err)
|
s.log.Error().Msgf("Failed to refresh protocol, err: %v", err)
|
||||||
return s.current
|
return s.current
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -139,7 +139,13 @@ func (s *autoProtocolSelector) Fallback() (Protocol, bool) {
|
||||||
|
|
||||||
type PercentageFetcher func() (int32, error)
|
type PercentageFetcher func() (int32, error)
|
||||||
|
|
||||||
func NewProtocolSelector(protocolFlag string, namedTunnel *NamedTunnelConfig, fetchFunc PercentageFetcher, ttl time.Duration, logger logger.Service) (ProtocolSelector, error) {
|
func NewProtocolSelector(
|
||||||
|
protocolFlag string,
|
||||||
|
namedTunnel *NamedTunnelConfig,
|
||||||
|
fetchFunc PercentageFetcher,
|
||||||
|
ttl time.Duration,
|
||||||
|
log *zerolog.Logger,
|
||||||
|
) (ProtocolSelector, error) {
|
||||||
if namedTunnel == nil {
|
if namedTunnel == nil {
|
||||||
return &staticProtocolSelector{
|
return &staticProtocolSelector{
|
||||||
current: H2mux,
|
current: H2mux,
|
||||||
|
@ -157,9 +163,9 @@ func NewProtocolSelector(protocolFlag string, namedTunnel *NamedTunnelConfig, fe
|
||||||
}
|
}
|
||||||
if protocolFlag == HTTP2.String() {
|
if protocolFlag == HTTP2.String() {
|
||||||
if http2Percentage < 0 {
|
if http2Percentage < 0 {
|
||||||
return newAutoProtocolSelector(H2mux, explicitHTTP2FallbackThreshold, fetchFunc, ttl, logger), nil
|
return newAutoProtocolSelector(H2mux, explicitHTTP2FallbackThreshold, fetchFunc, ttl, log), nil
|
||||||
}
|
}
|
||||||
return newAutoProtocolSelector(HTTP2, explicitHTTP2FallbackThreshold, fetchFunc, ttl, logger), nil
|
return newAutoProtocolSelector(HTTP2, explicitHTTP2FallbackThreshold, fetchFunc, ttl, log), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
if protocolFlag != autoSelectFlag {
|
if protocolFlag != autoSelectFlag {
|
||||||
|
@ -167,13 +173,13 @@ func NewProtocolSelector(protocolFlag string, namedTunnel *NamedTunnelConfig, fe
|
||||||
}
|
}
|
||||||
threshold := switchThreshold(namedTunnel.Credentials.AccountTag)
|
threshold := switchThreshold(namedTunnel.Credentials.AccountTag)
|
||||||
if threshold < http2Percentage {
|
if threshold < http2Percentage {
|
||||||
return newAutoProtocolSelector(HTTP2, threshold, fetchFunc, ttl, logger), nil
|
return newAutoProtocolSelector(HTTP2, threshold, fetchFunc, ttl, log), nil
|
||||||
}
|
}
|
||||||
return newAutoProtocolSelector(H2mux, threshold, fetchFunc, ttl, logger), nil
|
return newAutoProtocolSelector(H2mux, threshold, fetchFunc, ttl, log), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func switchThreshold(accountTag string) int32 {
|
func switchThreshold(accountTag string) int32 {
|
||||||
h := fnv.New32a()
|
h := fnv.New32a()
|
||||||
h.Write([]byte(accountTag))
|
_, _ = h.Write([]byte(accountTag))
|
||||||
return int32(h.Sum32() % 100)
|
return int32(h.Sum32() % 100)
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,6 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -130,9 +129,9 @@ func TestNewProtocolSelector(t *testing.T) {
|
||||||
wantErr: true,
|
wantErr: true,
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
logger, _ := logger.New()
|
|
||||||
for _, test := range tests {
|
for _, test := range tests {
|
||||||
selector, err := NewProtocolSelector(test.protocol, test.namedTunnelConfig, test.fetchFunc, testNoTTL, logger)
|
selector, err := NewProtocolSelector(test.protocol, test.namedTunnelConfig, test.fetchFunc, testNoTTL, &log)
|
||||||
if test.wantErr {
|
if test.wantErr {
|
||||||
assert.Error(t, err, fmt.Sprintf("test %s failed", test.name))
|
assert.Error(t, err, fmt.Sprintf("test %s failed", test.name))
|
||||||
} else {
|
} else {
|
||||||
|
@ -148,9 +147,8 @@ func TestNewProtocolSelector(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestAutoProtocolSelectorRefresh(t *testing.T) {
|
func TestAutoProtocolSelectorRefresh(t *testing.T) {
|
||||||
logger, _ := logger.New()
|
|
||||||
fetcher := dynamicMockFetcher{}
|
fetcher := dynamicMockFetcher{}
|
||||||
selector, err := NewProtocolSelector("auto", testNamedTunnelConfig, fetcher.fetch(), testNoTTL, logger)
|
selector, err := NewProtocolSelector("auto", testNamedTunnelConfig, fetcher.fetch(), testNoTTL, &log)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, H2mux, selector.Current())
|
assert.Equal(t, H2mux, selector.Current())
|
||||||
|
|
||||||
|
@ -178,9 +176,8 @@ func TestAutoProtocolSelectorRefresh(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestHTTP2ProtocolSelectorRefresh(t *testing.T) {
|
func TestHTTP2ProtocolSelectorRefresh(t *testing.T) {
|
||||||
logger, _ := logger.New()
|
|
||||||
fetcher := dynamicMockFetcher{}
|
fetcher := dynamicMockFetcher{}
|
||||||
selector, err := NewProtocolSelector("http2", testNamedTunnelConfig, fetcher.fetch(), testNoTTL, logger)
|
selector, err := NewProtocolSelector("http2", testNamedTunnelConfig, fetcher.fetch(), testNoTTL, &log)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, HTTP2, selector.Current())
|
assert.Equal(t, HTTP2, selector.Current())
|
||||||
|
|
||||||
|
@ -208,9 +205,8 @@ func TestHTTP2ProtocolSelectorRefresh(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProtocolSelectorRefreshTTL(t *testing.T) {
|
func TestProtocolSelectorRefreshTTL(t *testing.T) {
|
||||||
logger, _ := logger.New()
|
|
||||||
fetcher := dynamicMockFetcher{percentage: 100}
|
fetcher := dynamicMockFetcher{percentage: 100}
|
||||||
selector, err := NewProtocolSelector("auto", testNamedTunnelConfig, fetcher.fetch(), time.Hour, logger)
|
selector, err := NewProtocolSelector("auto", testNamedTunnelConfig, fetcher.fetch(), time.Hour, &log)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
assert.Equal(t, HTTP2, selector.Current())
|
assert.Equal(t, HTTP2, selector.Current())
|
||||||
|
|
||||||
|
|
|
@ -6,9 +6,10 @@ import (
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/tunnelrpc"
|
"github.com/cloudflare/cloudflared/tunnelrpc"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"zombiezen.com/go/capnproto2/rpc"
|
"zombiezen.com/go/capnproto2/rpc"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -22,12 +23,12 @@ type tunnelServerClient struct {
|
||||||
func NewTunnelServerClient(
|
func NewTunnelServerClient(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
stream io.ReadWriteCloser,
|
stream io.ReadWriteCloser,
|
||||||
logger logger.Service,
|
log *zerolog.Logger,
|
||||||
) *tunnelServerClient {
|
) *tunnelServerClient {
|
||||||
transport := tunnelrpc.NewTransportLogger(logger, rpc.StreamTransport(stream))
|
transport := tunnelrpc.NewTransportLogger(log, rpc.StreamTransport(stream))
|
||||||
conn := rpc.NewConn(
|
conn := rpc.NewConn(
|
||||||
transport,
|
transport,
|
||||||
tunnelrpc.ConnLog(logger),
|
tunnelrpc.ConnLog(log),
|
||||||
)
|
)
|
||||||
registrationClient := tunnelpogs.RegistrationServer_PogsClient{Client: conn.Bootstrap(ctx), Conn: conn}
|
registrationClient := tunnelpogs.RegistrationServer_PogsClient{Client: conn.Bootstrap(ctx), Conn: conn}
|
||||||
return &tunnelServerClient{
|
return &tunnelServerClient{
|
||||||
|
@ -46,8 +47,8 @@ func (tsc *tunnelServerClient) Authenticate(ctx context.Context, classicTunnel *
|
||||||
|
|
||||||
func (tsc *tunnelServerClient) Close() {
|
func (tsc *tunnelServerClient) Close() {
|
||||||
// Closing the client will also close the connection
|
// Closing the client will also close the connection
|
||||||
tsc.client.Close()
|
_ = tsc.client.Close()
|
||||||
tsc.transport.Close()
|
_ = tsc.transport.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
type NamedTunnelRPCClient interface {
|
type NamedTunnelRPCClient interface {
|
||||||
|
@ -70,12 +71,12 @@ type registrationServerClient struct {
|
||||||
func newRegistrationRPCClient(
|
func newRegistrationRPCClient(
|
||||||
ctx context.Context,
|
ctx context.Context,
|
||||||
stream io.ReadWriteCloser,
|
stream io.ReadWriteCloser,
|
||||||
logger logger.Service,
|
log *zerolog.Logger,
|
||||||
) NamedTunnelRPCClient {
|
) NamedTunnelRPCClient {
|
||||||
transport := tunnelrpc.NewTransportLogger(logger, rpc.StreamTransport(stream))
|
transport := tunnelrpc.NewTransportLogger(log, rpc.StreamTransport(stream))
|
||||||
conn := rpc.NewConn(
|
conn := rpc.NewConn(
|
||||||
transport,
|
transport,
|
||||||
tunnelrpc.ConnLog(logger),
|
tunnelrpc.ConnLog(log),
|
||||||
)
|
)
|
||||||
return ®istrationServerClient{
|
return ®istrationServerClient{
|
||||||
client: tunnelpogs.RegistrationServer_PogsClient{Client: conn.Bootstrap(ctx), Conn: conn},
|
client: tunnelpogs.RegistrationServer_PogsClient{Client: conn.Bootstrap(ctx), Conn: conn},
|
||||||
|
@ -117,14 +118,14 @@ func (rsc *registrationServerClient) RegisterConnection(
|
||||||
func (rsc *registrationServerClient) GracefulShutdown(ctx context.Context, gracePeriod time.Duration) {
|
func (rsc *registrationServerClient) GracefulShutdown(ctx context.Context, gracePeriod time.Duration) {
|
||||||
ctx, cancel := context.WithTimeout(ctx, gracePeriod)
|
ctx, cancel := context.WithTimeout(ctx, gracePeriod)
|
||||||
defer cancel()
|
defer cancel()
|
||||||
rsc.client.UnregisterConnection(ctx)
|
_ = rsc.client.UnregisterConnection(ctx)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (rsc *registrationServerClient) Close() {
|
func (rsc *registrationServerClient) Close() {
|
||||||
// Closing the client will also close the connection
|
// Closing the client will also close the connection
|
||||||
rsc.client.Close()
|
_ = rsc.client.Close()
|
||||||
// Closing the transport also closes the stream
|
// Closing the transport also closes the stream
|
||||||
rsc.transport.Close()
|
_ = rsc.transport.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
type rpcName string
|
type rpcName string
|
||||||
|
@ -143,10 +144,10 @@ func (h *h2muxConnection) registerTunnel(ctx context.Context, credentialSetter C
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rpcClient := NewTunnelServerClient(ctx, stream, h.observer)
|
rpcClient := NewTunnelServerClient(ctx, stream, h.observer.log)
|
||||||
defer rpcClient.Close()
|
defer rpcClient.Close()
|
||||||
|
|
||||||
h.logServerInfo(ctx, rpcClient)
|
_ = h.logServerInfo(ctx, rpcClient)
|
||||||
registration := rpcClient.client.RegisterTunnel(
|
registration := rpcClient.client.RegisterTunnel(
|
||||||
ctx,
|
ctx,
|
||||||
classicTunnel.OriginCert,
|
classicTunnel.OriginCert,
|
||||||
|
@ -178,12 +179,12 @@ func (h *h2muxConnection) processRegistrationSuccess(
|
||||||
credentialManager CredentialManager, classicTunnel *ClassicTunnelConfig,
|
credentialManager CredentialManager, classicTunnel *ClassicTunnelConfig,
|
||||||
) error {
|
) error {
|
||||||
for _, logLine := range registration.LogLines {
|
for _, logLine := range registration.LogLines {
|
||||||
h.observer.Info(logLine)
|
h.observer.log.Info().Msg(logLine)
|
||||||
}
|
}
|
||||||
|
|
||||||
if registration.TunnelID != "" {
|
if registration.TunnelID != "" {
|
||||||
h.observer.metrics.tunnelsHA.AddTunnelID(h.connIndex, registration.TunnelID)
|
h.observer.metrics.tunnelsHA.AddTunnelID(h.connIndex, registration.TunnelID)
|
||||||
h.observer.Infof("Each HA connection's tunnel IDs: %v", h.observer.metrics.tunnelsHA.String())
|
h.observer.log.Info().Msgf("Each HA connection's tunnel IDs: %v", h.observer.metrics.tunnelsHA.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
// Print out the user's trial zone URL in a nice box (if they requested and got one and UI flag is not set)
|
// Print out the user's trial zone URL in a nice box (if they requested and got one and UI flag is not set)
|
||||||
|
@ -197,7 +198,7 @@ func (h *h2muxConnection) processRegistrationSuccess(
|
||||||
credentialManager.SetConnDigest(h.connIndex, registration.ConnDigest)
|
credentialManager.SetConnDigest(h.connIndex, registration.ConnDigest)
|
||||||
h.observer.metrics.userHostnamesCounts.WithLabelValues(registration.Url).Inc()
|
h.observer.metrics.userHostnamesCounts.WithLabelValues(registration.Url).Inc()
|
||||||
|
|
||||||
h.observer.Infof("Route propagating, it may take up to 1 minute for your new route to become functional")
|
h.observer.log.Info().Msgf("Route propagating, it may take up to 1 minute for your new route to become functional")
|
||||||
h.observer.metrics.regSuccess.WithLabelValues(string(name)).Inc()
|
h.observer.metrics.regSuccess.WithLabelValues(string(name)).Inc()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
@ -228,15 +229,15 @@ func (h *h2muxConnection) reconnectTunnel(ctx context.Context, credentialManager
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
h.observer.Debug("initiating RPC stream to reconnect")
|
h.observer.log.Debug().Msg("initiating RPC stream to reconnect")
|
||||||
stream, err := h.newRPCStream(ctx, register)
|
stream, err := h.newRPCStream(ctx, register)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
rpcClient := NewTunnelServerClient(ctx, stream, h.observer)
|
rpcClient := NewTunnelServerClient(ctx, stream, h.observer.log)
|
||||||
defer rpcClient.Close()
|
defer rpcClient.Close()
|
||||||
|
|
||||||
h.logServerInfo(ctx, rpcClient)
|
_ = h.logServerInfo(ctx, rpcClient)
|
||||||
registration := rpcClient.client.ReconnectTunnel(
|
registration := rpcClient.client.ReconnectTunnel(
|
||||||
ctx,
|
ctx,
|
||||||
token,
|
token,
|
||||||
|
@ -259,15 +260,15 @@ func (h *h2muxConnection) logServerInfo(ctx context.Context, rpcClient *tunnelSe
|
||||||
})
|
})
|
||||||
serverInfoMessage, err := serverInfoPromise.Result().Struct()
|
serverInfoMessage, err := serverInfoPromise.Result().Struct()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.observer.Errorf("Failed to retrieve server information: %s", err)
|
h.observer.log.Error().Msgf("Failed to retrieve server information: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
serverInfo, err := tunnelpogs.UnmarshalServerInfo(serverInfoMessage)
|
serverInfo, err := tunnelpogs.UnmarshalServerInfo(serverInfoMessage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
h.observer.Errorf("Failed to retrieve server information: %s", err)
|
h.observer.log.Error().Msgf("Failed to retrieve server information: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
h.observer.logServerInfo(h.connIndex, serverInfo.LocationName, fmt.Sprintf("Connnection %d connected to %s", h.connIndex, serverInfo.LocationName))
|
h.observer.logServerInfo(h.connIndex, serverInfo.LocationName, fmt.Sprintf("Connection %d connected to %s", h.connIndex, serverInfo.LocationName))
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -281,15 +282,15 @@ func (h *h2muxConnection) unregister(isNamedTunnel bool) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if isNamedTunnel {
|
if isNamedTunnel {
|
||||||
rpcClient := newRegistrationRPCClient(unregisterCtx, stream, h.observer)
|
rpcClient := newRegistrationRPCClient(unregisterCtx, stream, h.observer.log)
|
||||||
defer rpcClient.Close()
|
defer rpcClient.Close()
|
||||||
|
|
||||||
rpcClient.GracefulShutdown(unregisterCtx, h.config.GracePeriod)
|
rpcClient.GracefulShutdown(unregisterCtx, h.config.GracePeriod)
|
||||||
} else {
|
} else {
|
||||||
rpcClient := NewTunnelServerClient(unregisterCtx, stream, h.observer)
|
rpcClient := NewTunnelServerClient(unregisterCtx, stream, h.observer.log)
|
||||||
defer rpcClient.Close()
|
defer rpcClient.Close()
|
||||||
|
|
||||||
// gracePeriod is encoded in int64 using capnproto
|
// gracePeriod is encoded in int64 using capnproto
|
||||||
rpcClient.client.UnregisterTunnel(unregisterCtx, h.config.GracePeriod.Nanoseconds())
|
_ = rpcClient.client.UnregisterTunnel(unregisterCtx, h.config.GracePeriod.Nanoseconds())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,20 +8,21 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/url"
|
"net/url"
|
||||||
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/hello"
|
"github.com/cloudflare/cloudflared/hello"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/validation"
|
"github.com/cloudflare/cloudflared/validation"
|
||||||
"github.com/gorilla/mux"
|
"github.com/gorilla/mux"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Proxy is an HTTP server that proxies requests to a Client.
|
// Proxy is an HTTP server that proxies requests to a Client.
|
||||||
type Proxy struct {
|
type Proxy struct {
|
||||||
client Client
|
client Client
|
||||||
accessValidator *validation.Access
|
accessValidator *validation.Access
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewInsecureProxy creates a Proxy that talks to a Client at an origin.
|
// NewInsecureProxy creates a Proxy that talks to a Client at an origin.
|
||||||
|
@ -43,12 +44,9 @@ func NewInsecureProxy(ctx context.Context, origin string) (*Proxy, error) {
|
||||||
return nil, errors.Wrap(err, "could not connect to the database")
|
return nil, errors.Wrap(err, "could not connect to the database")
|
||||||
}
|
}
|
||||||
|
|
||||||
logger, err := logger.New() // TODO: Does not obey log configuration
|
log := zerolog.New(os.Stderr).With().Logger() // TODO: Does not obey log configuration
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "error setting up logger")
|
|
||||||
}
|
|
||||||
|
|
||||||
return &Proxy{client, nil, logger}, nil
|
return &Proxy{client, nil, &log}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewSecureProxy creates a Proxy that talks to a Client at an origin.
|
// NewSecureProxy creates a Proxy that talks to a Client at an origin.
|
||||||
|
@ -96,7 +94,7 @@ func (proxy *Proxy) IsAllowed(r *http.Request, verbose ...bool) bool {
|
||||||
// of either a misconfiguration of the CLI or a massive failure of upstream systems.
|
// of either a misconfiguration of the CLI or a massive failure of upstream systems.
|
||||||
if len(verbose) > 0 {
|
if len(verbose) > 0 {
|
||||||
cfRay := proxy.getRayHeader(r)
|
cfRay := proxy.getRayHeader(r)
|
||||||
proxy.logger.Infof("dbproxy: Failed JWT authentication: cf-ray: %s %s", cfRay, err)
|
proxy.log.Info().Msgf("dbproxy: Failed JWT authentication: cf-ray: %s %s", cfRay, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
return false
|
return false
|
||||||
|
@ -151,8 +149,8 @@ func (proxy *Proxy) httpListen(ctx context.Context, listener net.Listener) error
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
httpServer.Close()
|
_ = httpServer.Close()
|
||||||
listener.Close()
|
_ = listener.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return httpServer.Serve(listener)
|
return httpServer.Serve(listener)
|
||||||
|
@ -241,7 +239,7 @@ func (proxy *Proxy) httpRespondErr(w http.ResponseWriter, r *http.Request, defau
|
||||||
proxy.httpRespond(w, r, status, err.Error())
|
proxy.httpRespond(w, r, status, err.Error())
|
||||||
if len(err.Error()) > 0 {
|
if len(err.Error()) > 0 {
|
||||||
cfRay := proxy.getRayHeader(r)
|
cfRay := proxy.getRayHeader(r)
|
||||||
proxy.logger.Infof("dbproxy: Database proxy error: cf-ray: %s %s", cfRay, err)
|
proxy.log.Info().Msgf("dbproxy: Database proxy error: cf-ray: %s %s", cfRay, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -46,7 +46,7 @@ func NewSQLClient(ctx context.Context, originURL *url.URL) (Client, error) {
|
||||||
// Closes the driver, will occur when the context finishes.
|
// Closes the driver, will occur when the context finishes.
|
||||||
go func() {
|
go func() {
|
||||||
<-ctx.Done()
|
<-ctx.Done()
|
||||||
driver.Close()
|
_ = driver.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return &SQLClient{driver.DriverName(), driver}, nil
|
return &SQLClient{driver.DriverName(), driver}, nil
|
||||||
|
@ -260,7 +260,7 @@ func sqlRows(rows *sql.Rows) ([]map[string]interface{}, error) {
|
||||||
for i := range columns {
|
for i := range columns {
|
||||||
pointers[i] = &values[i]
|
pointers[i] = &values[i]
|
||||||
}
|
}
|
||||||
rows.Scan(pointers...)
|
_ = rows.Scan(pointers...)
|
||||||
|
|
||||||
// Convert a row, an array of values, into an object where
|
// Convert a row, an array of values, into an object where
|
||||||
// each key is the name of its respective column.
|
// each key is the name of its respective column.
|
||||||
|
|
|
@ -7,8 +7,8 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -58,15 +58,15 @@ var friendlyDNSErrorLines = []string{
|
||||||
}
|
}
|
||||||
|
|
||||||
// EdgeDiscovery implements HA service discovery lookup.
|
// EdgeDiscovery implements HA service discovery lookup.
|
||||||
func edgeDiscovery(logger logger.Service) ([][]*net.TCPAddr, error) {
|
func edgeDiscovery(log *zerolog.Logger) ([][]*net.TCPAddr, error) {
|
||||||
_, addrs, err := netLookupSRV(srvService, srvProto, srvName)
|
_, addrs, err := netLookupSRV(srvService, srvProto, srvName)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
_, fallbackAddrs, fallbackErr := fallbackLookupSRV(srvService, srvProto, srvName)
|
_, fallbackAddrs, fallbackErr := fallbackLookupSRV(srvService, srvProto, srvName)
|
||||||
if fallbackErr != nil || len(fallbackAddrs) == 0 {
|
if fallbackErr != nil || len(fallbackAddrs) == 0 {
|
||||||
// use the original DNS error `err` in messages, not `fallbackErr`
|
// use the original DNS error `err` in messages, not `fallbackErr`
|
||||||
logger.Errorf("Error looking up Cloudflare edge IPs: the DNS query failed: %s", err)
|
log.Error().Msgf("Error looking up Cloudflare edge IPs: the DNS query failed: %s", err)
|
||||||
for _, s := range friendlyDNSErrorLines {
|
for _, s := range friendlyDNSErrorLines {
|
||||||
logger.Error(s)
|
log.Error().Msg(s)
|
||||||
}
|
}
|
||||||
return nil, errors.Wrapf(err, "Could not lookup srv records on _%v._%v.%v", srvService, srvProto, srvName)
|
return nil, errors.Wrapf(err, "Could not lookup srv records on _%v._%v.%v", srvService, srvProto, srvName)
|
||||||
}
|
}
|
||||||
|
@ -122,11 +122,11 @@ func resolveSRVToTCP(srv *net.SRV) ([]*net.TCPAddr, error) {
|
||||||
|
|
||||||
// ResolveAddrs resolves TCP address given a list of addresses. Address can be a hostname, however, it will return at most one
|
// ResolveAddrs resolves TCP address given a list of addresses. Address can be a hostname, however, it will return at most one
|
||||||
// of the hostname's IP addresses.
|
// of the hostname's IP addresses.
|
||||||
func ResolveAddrs(addrs []string, logger logger.Service) (resolved []*net.TCPAddr) {
|
func ResolveAddrs(addrs []string, log *zerolog.Logger) (resolved []*net.TCPAddr) {
|
||||||
for _, addr := range addrs {
|
for _, addr := range addrs {
|
||||||
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
|
tcpAddr, err := net.ResolveTCPAddr("tcp", addr)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("Failed to resolve %s, err: %v", addr, err)
|
log.Error().Msgf("Failed to resolve %s, err: %v", addr, err)
|
||||||
} else {
|
} else {
|
||||||
resolved = append(resolved, tcpAddr)
|
resolved = append(resolved, tcpAddr)
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,7 +3,7 @@ package allregions
|
||||||
import (
|
import (
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -19,8 +19,8 @@ func TestEdgeDiscovery(t *testing.T) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
l := zerolog.Nop()
|
||||||
addrLists, err := edgeDiscovery(l)
|
addrLists, err := edgeDiscovery(&l)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
actualAddrSet := map[string]bool{}
|
actualAddrSet := map[string]bool{}
|
||||||
for _, addrs := range addrLists {
|
for _, addrs := range addrLists {
|
||||||
|
|
|
@ -57,7 +57,6 @@ func (r Region) GetUnusedIP(excluding *net.TCPAddr) *net.TCPAddr {
|
||||||
// Use the address, assigning it to a proxy connection.
|
// Use the address, assigning it to a proxy connection.
|
||||||
func (r Region) Use(addr *net.TCPAddr, connID int) {
|
func (r Region) Use(addr *net.TCPAddr, connID int) {
|
||||||
if addr == nil {
|
if addr == nil {
|
||||||
//logrus.Errorf("Attempted to use nil address for connection %d", connID)
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
r.connFor[addr] = InUse(connID)
|
r.connFor[addr] = InUse(connID)
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"net"
|
"net"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Regions stores Cloudflare edge network IPs, partitioned into two regions.
|
// Regions stores Cloudflare edge network IPs, partitioned into two regions.
|
||||||
|
@ -19,8 +19,8 @@ type Regions struct {
|
||||||
// ------------------------------------
|
// ------------------------------------
|
||||||
|
|
||||||
// ResolveEdge resolves the Cloudflare edge, returning all regions discovered.
|
// ResolveEdge resolves the Cloudflare edge, returning all regions discovered.
|
||||||
func ResolveEdge(logger logger.Service) (*Regions, error) {
|
func ResolveEdge(log *zerolog.Logger) (*Regions, error) {
|
||||||
addrLists, err := edgeDiscovery(logger)
|
addrLists, err := edgeDiscovery(log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -35,8 +35,8 @@ func ResolveEdge(logger logger.Service) (*Regions, error) {
|
||||||
|
|
||||||
// StaticEdge creates a list of edge addresses from the list of hostnames.
|
// StaticEdge creates a list of edge addresses from the list of hostnames.
|
||||||
// Mainly used for testing connectivity.
|
// Mainly used for testing connectivity.
|
||||||
func StaticEdge(hostnames []string, logger logger.Service) (*Regions, error) {
|
func StaticEdge(hostnames []string, log *zerolog.Logger) (*Regions, error) {
|
||||||
resolved := ResolveAddrs(hostnames, logger)
|
resolved := ResolveAddrs(hostnames, log)
|
||||||
if len(resolved) == 0 {
|
if len(resolved) == 0 {
|
||||||
return nil, fmt.Errorf("failed to resolve any edge address")
|
return nil, fmt.Errorf("failed to resolve any edge address")
|
||||||
}
|
}
|
||||||
|
|
|
@ -6,7 +6,7 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/edgediscovery/allregions"
|
"github.com/cloudflare/cloudflared/edgediscovery/allregions"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -19,7 +19,7 @@ var errNoAddressesLeft = fmt.Errorf("There are no free edge addresses left")
|
||||||
type Edge struct {
|
type Edge struct {
|
||||||
regions *allregions.Regions
|
regions *allregions.Regions
|
||||||
sync.Mutex
|
sync.Mutex
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// ------------------------------------
|
// ------------------------------------
|
||||||
|
@ -28,34 +28,34 @@ type Edge struct {
|
||||||
|
|
||||||
// ResolveEdge runs the initial discovery of the Cloudflare edge, finding Addrs that can be allocated
|
// ResolveEdge runs the initial discovery of the Cloudflare edge, finding Addrs that can be allocated
|
||||||
// to connections.
|
// to connections.
|
||||||
func ResolveEdge(l logger.Service) (*Edge, error) {
|
func ResolveEdge(log *zerolog.Logger) (*Edge, error) {
|
||||||
regions, err := allregions.ResolveEdge(l)
|
regions, err := allregions.ResolveEdge(log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return new(Edge), err
|
return new(Edge), err
|
||||||
}
|
}
|
||||||
return &Edge{
|
return &Edge{
|
||||||
logger: l,
|
log: log,
|
||||||
regions: regions,
|
regions: regions,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// StaticEdge creates a list of edge addresses from the list of hostnames. Mainly used for testing connectivity.
|
// StaticEdge creates a list of edge addresses from the list of hostnames. Mainly used for testing connectivity.
|
||||||
func StaticEdge(l logger.Service, hostnames []string) (*Edge, error) {
|
func StaticEdge(log *zerolog.Logger, hostnames []string) (*Edge, error) {
|
||||||
regions, err := allregions.StaticEdge(hostnames, l)
|
regions, err := allregions.StaticEdge(hostnames, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return new(Edge), err
|
return new(Edge), err
|
||||||
}
|
}
|
||||||
return &Edge{
|
return &Edge{
|
||||||
logger: l,
|
log: log,
|
||||||
regions: regions,
|
regions: regions,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// MockEdge creates a Cloudflare Edge from arbitrary TCP addresses. Used for testing.
|
// MockEdge creates a Cloudflare Edge from arbitrary TCP addresses. Used for testing.
|
||||||
func MockEdge(l logger.Service, addrs []*net.TCPAddr) *Edge {
|
func MockEdge(log *zerolog.Logger, addrs []*net.TCPAddr) *Edge {
|
||||||
regions := allregions.NewNoResolve(addrs)
|
regions := allregions.NewNoResolve(addrs)
|
||||||
return &Edge{
|
return &Edge{
|
||||||
logger: l,
|
log: log,
|
||||||
regions: regions,
|
regions: regions,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -82,17 +82,17 @@ func (ed *Edge) GetAddr(connID int) (*net.TCPAddr, error) {
|
||||||
|
|
||||||
// If this connection has already used an edge addr, return it.
|
// If this connection has already used an edge addr, return it.
|
||||||
if addr := ed.regions.AddrUsedBy(connID); addr != nil {
|
if addr := ed.regions.AddrUsedBy(connID); addr != nil {
|
||||||
ed.logger.Debugf("edgediscovery - GetAddr: Returning same address back to proxy connection: connID: %d", connID)
|
ed.log.Debug().Msgf("edgediscovery - GetAddr: Returning same address back to proxy connection: connID: %d", connID)
|
||||||
return addr, nil
|
return addr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Otherwise, give it an unused one
|
// Otherwise, give it an unused one
|
||||||
addr := ed.regions.GetUnusedAddr(nil, connID)
|
addr := ed.regions.GetUnusedAddr(nil, connID)
|
||||||
if addr == nil {
|
if addr == nil {
|
||||||
ed.logger.Debugf("edgediscovery - GetAddr: No addresses left to give proxy connection: connID: %d", connID)
|
ed.log.Debug().Msgf("edgediscovery - GetAddr: No addresses left to give proxy connection: connID: %d", connID)
|
||||||
return nil, errNoAddressesLeft
|
return nil, errNoAddressesLeft
|
||||||
}
|
}
|
||||||
ed.logger.Debugf("edgediscovery - GetAddr: Giving connection its new address %s: connID: %d", addr, connID)
|
ed.log.Debug().Msgf("edgediscovery - GetAddr: Giving connection its new address %s: connID: %d", addr, connID)
|
||||||
return addr, nil
|
return addr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -107,11 +107,11 @@ func (ed *Edge) GetDifferentAddr(connID int) (*net.TCPAddr, error) {
|
||||||
}
|
}
|
||||||
addr := ed.regions.GetUnusedAddr(oldAddr, connID)
|
addr := ed.regions.GetUnusedAddr(oldAddr, connID)
|
||||||
if addr == nil {
|
if addr == nil {
|
||||||
ed.logger.Debugf("edgediscovery - GetDifferentAddr: No addresses left to give proxy connection: connID: %d", connID)
|
ed.log.Debug().Msgf("edgediscovery - GetDifferentAddr: No addresses left to give proxy connection: connID: %d", connID)
|
||||||
// note: if oldAddr were not nil, it will become available on the next iteration
|
// note: if oldAddr were not nil, it will become available on the next iteration
|
||||||
return nil, errNoAddressesLeft
|
return nil, errNoAddressesLeft
|
||||||
}
|
}
|
||||||
ed.logger.Debugf("edgediscovery - GetDifferentAddr: Giving connection its new address %s: connID: %d", addr, connID)
|
ed.log.Debug().Msgf("edgediscovery - GetDifferentAddr: Giving connection its new address %s: connID: %d", addr, connID)
|
||||||
return addr, nil
|
return addr, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -127,6 +127,6 @@ func (ed *Edge) AvailableAddrs() int {
|
||||||
func (ed *Edge) GiveBack(addr *net.TCPAddr) bool {
|
func (ed *Edge) GiveBack(addr *net.TCPAddr) bool {
|
||||||
ed.Lock()
|
ed.Lock()
|
||||||
defer ed.Unlock()
|
defer ed.Unlock()
|
||||||
ed.logger.Debug("edgediscovery - GiveBack: Address now unused")
|
ed.log.Debug().Msg("edgediscovery - GiveBack: Address now unused")
|
||||||
return ed.regions.GiveBack(addr)
|
return ed.regions.GiveBack(addr)
|
||||||
}
|
}
|
||||||
|
|
|
@ -4,7 +4,7 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"testing"
|
"testing"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -29,11 +29,12 @@ var (
|
||||||
Port: 8000,
|
Port: 8000,
|
||||||
Zone: "",
|
Zone: "",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
log = zerolog.Nop()
|
||||||
)
|
)
|
||||||
|
|
||||||
func TestGiveBack(t *testing.T) {
|
func TestGiveBack(t *testing.T) {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
edge := MockEdge(&log, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3})
|
||||||
edge := MockEdge(l, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3})
|
|
||||||
|
|
||||||
// Give this connection an address
|
// Give this connection an address
|
||||||
assert.Equal(t, 4, edge.AvailableAddrs())
|
assert.Equal(t, 4, edge.AvailableAddrs())
|
||||||
|
@ -49,10 +50,8 @@ func TestGiveBack(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestRPCAndProxyShareSingleEdgeIP(t *testing.T) {
|
func TestRPCAndProxyShareSingleEdgeIP(t *testing.T) {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
|
||||||
|
|
||||||
// Make an edge with a single IP
|
// Make an edge with a single IP
|
||||||
edge := MockEdge(l, []*net.TCPAddr{&addr0})
|
edge := MockEdge(&log, []*net.TCPAddr{&addr0})
|
||||||
tunnelConnID := 0
|
tunnelConnID := 0
|
||||||
|
|
||||||
// Use the IP for a tunnel
|
// Use the IP for a tunnel
|
||||||
|
@ -66,8 +65,7 @@ func TestRPCAndProxyShareSingleEdgeIP(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetAddrForRPC(t *testing.T) {
|
func TestGetAddrForRPC(t *testing.T) {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
edge := MockEdge(&log, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3})
|
||||||
edge := MockEdge(l, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3})
|
|
||||||
|
|
||||||
// Get a connection
|
// Get a connection
|
||||||
assert.Equal(t, 4, edge.AvailableAddrs())
|
assert.Equal(t, 4, edge.AvailableAddrs())
|
||||||
|
@ -84,10 +82,8 @@ func TestGetAddrForRPC(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOnePerRegion(t *testing.T) {
|
func TestOnePerRegion(t *testing.T) {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
|
||||||
|
|
||||||
// Make an edge with only one address
|
// Make an edge with only one address
|
||||||
edge := MockEdge(l, []*net.TCPAddr{&addr0, &addr1})
|
edge := MockEdge(&log, []*net.TCPAddr{&addr0, &addr1})
|
||||||
|
|
||||||
// Use the only address
|
// Use the only address
|
||||||
const connID = 0
|
const connID = 0
|
||||||
|
@ -108,10 +104,8 @@ func TestOnePerRegion(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestOnlyOneAddrLeft(t *testing.T) {
|
func TestOnlyOneAddrLeft(t *testing.T) {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
|
||||||
|
|
||||||
// Make an edge with only one address
|
// Make an edge with only one address
|
||||||
edge := MockEdge(l, []*net.TCPAddr{&addr0})
|
edge := MockEdge(&log, []*net.TCPAddr{&addr0})
|
||||||
|
|
||||||
// Use the only address
|
// Use the only address
|
||||||
const connID = 0
|
const connID = 0
|
||||||
|
@ -130,10 +124,8 @@ func TestOnlyOneAddrLeft(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestNoAddrsLeft(t *testing.T) {
|
func TestNoAddrsLeft(t *testing.T) {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
|
||||||
|
|
||||||
// Make an edge with no addresses
|
// Make an edge with no addresses
|
||||||
edge := MockEdge(l, []*net.TCPAddr{})
|
edge := MockEdge(&log, []*net.TCPAddr{})
|
||||||
|
|
||||||
_, err := edge.GetAddr(2)
|
_, err := edge.GetAddr(2)
|
||||||
assert.Error(t, err)
|
assert.Error(t, err)
|
||||||
|
@ -142,8 +134,7 @@ func TestNoAddrsLeft(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetAddr(t *testing.T) {
|
func TestGetAddr(t *testing.T) {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
edge := MockEdge(&log, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3})
|
||||||
edge := MockEdge(l, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3})
|
|
||||||
|
|
||||||
// Give this connection an address
|
// Give this connection an address
|
||||||
const connID = 0
|
const connID = 0
|
||||||
|
@ -158,8 +149,7 @@ func TestGetAddr(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGetDifferentAddr(t *testing.T) {
|
func TestGetDifferentAddr(t *testing.T) {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
edge := MockEdge(&log, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3})
|
||||||
edge := MockEdge(l, []*net.TCPAddr{&addr0, &addr1, &addr2, &addr3})
|
|
||||||
|
|
||||||
// Give this connection an address
|
// Give this connection an address
|
||||||
assert.Equal(t, 4, edge.AvailableAddrs())
|
assert.Equal(t, 4, edge.AvailableAddrs())
|
||||||
|
|
1
go.mod
1
go.mod
|
@ -50,6 +50,7 @@ require (
|
||||||
github.com/prometheus/client_golang v1.7.1
|
github.com/prometheus/client_golang v1.7.1
|
||||||
github.com/prometheus/common v0.13.0 // indirect
|
github.com/prometheus/common v0.13.0 // indirect
|
||||||
github.com/rivo/tview v0.0.0-20200712113419-c65badfc3d92
|
github.com/rivo/tview v0.0.0-20200712113419-c65badfc3d92
|
||||||
|
github.com/rs/zerolog v1.20.0
|
||||||
github.com/stretchr/testify v1.6.0
|
github.com/stretchr/testify v1.6.0
|
||||||
github.com/urfave/cli/v2 v2.2.0
|
github.com/urfave/cli/v2 v2.2.0
|
||||||
github.com/xo/dburl v0.0.0-20191005012637-293c3298d6c0
|
github.com/xo/dburl v0.0.0-20191005012637-293c3298d6c0
|
||||||
|
|
5
go.sum
5
go.sum
|
@ -143,6 +143,7 @@ github.com/coreos/go-oidc v0.0.0-20171002155002-a93f71fdfe73 h1:7CNPV0LWRCa1FNmq
|
||||||
github.com/coreos/go-oidc v0.0.0-20171002155002-a93f71fdfe73/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
github.com/coreos/go-oidc v0.0.0-20171002155002-a93f71fdfe73/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc=
|
||||||
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk=
|
||||||
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
|
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||||
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk=
|
||||||
|
@ -565,6 +566,9 @@ github.com/rivo/uniseg v0.1.0 h1:+2KBaVoUmb9XzDsrx/Ct0W/EYOSFf/nWTauy++DprtY=
|
||||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
|
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||||
|
github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
|
||||||
|
github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
|
||||||
github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4 h1:S9YlS71UNJIyS61OqGAmLXv3w5zclSidN+qwr80XxKs=
|
github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4 h1:S9YlS71UNJIyS61OqGAmLXv3w5zclSidN+qwr80XxKs=
|
||||||
github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v0.0.0-20170610170232-067529f716f4/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
github.com/russross/blackfriday/v2 v2.0.1 h1:lPqVAte+HuHNfhJ/0LC98ESWRz8afy9tM/0RK8m9o+Q=
|
||||||
|
@ -837,6 +841,7 @@ golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgw
|
||||||
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||||
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
golang.org/x/tools v0.0.0-20190828213141-aed303cbaa74/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||||
|
|
|
@ -8,11 +8,10 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"golang.org/x/net/http2/hpack"
|
"golang.org/x/net/http2/hpack"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -50,7 +49,7 @@ type MuxerConfig struct {
|
||||||
// The minimum number of heartbeats to send before terminating the connection.
|
// The minimum number of heartbeats to send before terminating the connection.
|
||||||
MaxHeartbeats uint64
|
MaxHeartbeats uint64
|
||||||
// Logger to use
|
// Logger to use
|
||||||
Logger logger.Service
|
Log *zerolog.Logger
|
||||||
CompressionQuality CompressionSetting
|
CompressionQuality CompressionSetting
|
||||||
// Initial size for HTTP2 flow control windows
|
// Initial size for HTTP2 flow control windows
|
||||||
DefaultWindowSize uint32
|
DefaultWindowSize uint32
|
||||||
|
@ -138,10 +137,10 @@ func Handshake(
|
||||||
handshakeSetting := http2.Setting{ID: SettingMuxerMagic, Val: MuxerMagicEdge}
|
handshakeSetting := http2.Setting{ID: SettingMuxerMagic, Val: MuxerMagicEdge}
|
||||||
compressionSetting := http2.Setting{ID: SettingCompression, Val: config.CompressionQuality.toH2Setting()}
|
compressionSetting := http2.Setting{ID: SettingCompression, Val: config.CompressionQuality.toH2Setting()}
|
||||||
if CompressionIsSupported() {
|
if CompressionIsSupported() {
|
||||||
config.Logger.Debug("muxer: Compression is supported")
|
config.Log.Debug().Msg("muxer: Compression is supported")
|
||||||
m.compressionQuality = config.CompressionQuality.getPreset()
|
m.compressionQuality = config.CompressionQuality.getPreset()
|
||||||
} else {
|
} else {
|
||||||
config.Logger.Debug("muxer: Compression is not supported")
|
config.Log.Debug().Msg("muxer: Compression is not supported")
|
||||||
compressionSetting = http2.Setting{ID: SettingCompression, Val: 0}
|
compressionSetting = http2.Setting{ID: SettingCompression, Val: 0}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -178,12 +177,12 @@ func Handshake(
|
||||||
// Sanity check to enusre idelDuration is sane
|
// Sanity check to enusre idelDuration is sane
|
||||||
if idleDuration == 0 || idleDuration < defaultTimeout {
|
if idleDuration == 0 || idleDuration < defaultTimeout {
|
||||||
idleDuration = defaultTimeout
|
idleDuration = defaultTimeout
|
||||||
config.Logger.Infof("muxer: Minimum idle time has been adjusted to %d", defaultTimeout)
|
config.Log.Info().Msgf("muxer: Minimum idle time has been adjusted to %d", defaultTimeout)
|
||||||
}
|
}
|
||||||
maxRetries := config.MaxHeartbeats
|
maxRetries := config.MaxHeartbeats
|
||||||
if maxRetries == 0 {
|
if maxRetries == 0 {
|
||||||
maxRetries = defaultRetries
|
maxRetries = defaultRetries
|
||||||
config.Logger.Infof("muxer: Minimum number of unacked heartbeats to send before closing the connection has been adjusted to %d", maxRetries)
|
config.Log.Info().Msgf("muxer: Minimum number of unacked heartbeats to send before closing the connection has been adjusted to %d", maxRetries)
|
||||||
}
|
}
|
||||||
|
|
||||||
compBytesBefore, compBytesAfter := NewAtomicCounter(0), NewAtomicCounter(0)
|
compBytesBefore, compBytesAfter := NewAtomicCounter(0), NewAtomicCounter(0)
|
||||||
|
@ -325,7 +324,7 @@ func (m *Muxer) Serve(ctx context.Context) error {
|
||||||
errGroup.Go(func() error {
|
errGroup.Go(func() error {
|
||||||
ch := make(chan error)
|
ch := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
err := m.muxReader.run(m.config.Logger)
|
err := m.muxReader.run(m.config.Log)
|
||||||
m.explicitShutdown.Fuse(false)
|
m.explicitShutdown.Fuse(false)
|
||||||
m.r.Close()
|
m.r.Close()
|
||||||
m.abort()
|
m.abort()
|
||||||
|
@ -346,7 +345,7 @@ func (m *Muxer) Serve(ctx context.Context) error {
|
||||||
errGroup.Go(func() error {
|
errGroup.Go(func() error {
|
||||||
ch := make(chan error)
|
ch := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
err := m.muxWriter.run(m.config.Logger)
|
err := m.muxWriter.run(m.config.Log)
|
||||||
m.explicitShutdown.Fuse(false)
|
m.explicitShutdown.Fuse(false)
|
||||||
m.w.Close()
|
m.w.Close()
|
||||||
m.abort()
|
m.abort()
|
||||||
|
@ -367,7 +366,7 @@ func (m *Muxer) Serve(ctx context.Context) error {
|
||||||
errGroup.Go(func() error {
|
errGroup.Go(func() error {
|
||||||
ch := make(chan error)
|
ch := make(chan error)
|
||||||
go func() {
|
go func() {
|
||||||
err := m.muxMetricsUpdater.run(m.config.Logger)
|
err := m.muxMetricsUpdater.run(m.config.Log)
|
||||||
// don't block if parent goroutine quit early
|
// don't block if parent goroutine quit early
|
||||||
select {
|
select {
|
||||||
case ch <- err:
|
case ch <- err:
|
||||||
|
|
|
@ -16,10 +16,9 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -27,6 +26,8 @@ const (
|
||||||
testHandshakeTimeout = time.Millisecond * 1000
|
testHandshakeTimeout = time.Millisecond * 1000
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var log = zerolog.Nop()
|
||||||
|
|
||||||
func TestMain(m *testing.M) {
|
func TestMain(m *testing.M) {
|
||||||
if os.Getenv("VERBOSE") == "1" {
|
if os.Getenv("VERBOSE") == "1" {
|
||||||
//TODO: set log level
|
//TODO: set log level
|
||||||
|
@ -52,7 +53,7 @@ func NewDefaultMuxerPair(t assert.TestingT, testName string, f MuxedStreamFunc)
|
||||||
Handler: f,
|
Handler: f,
|
||||||
IsClient: true,
|
IsClient: true,
|
||||||
Name: "origin",
|
Name: "origin",
|
||||||
Logger: logger.NewOutputWriter(logger.NewMockWriteManager()),
|
Log: &log,
|
||||||
DefaultWindowSize: (1 << 8) - 1,
|
DefaultWindowSize: (1 << 8) - 1,
|
||||||
MaxWindowSize: (1 << 15) - 1,
|
MaxWindowSize: (1 << 15) - 1,
|
||||||
StreamWriteBufferMaxLen: 1024,
|
StreamWriteBufferMaxLen: 1024,
|
||||||
|
@ -64,7 +65,7 @@ func NewDefaultMuxerPair(t assert.TestingT, testName string, f MuxedStreamFunc)
|
||||||
Timeout: testHandshakeTimeout,
|
Timeout: testHandshakeTimeout,
|
||||||
IsClient: false,
|
IsClient: false,
|
||||||
Name: "edge",
|
Name: "edge",
|
||||||
Logger: logger.NewOutputWriter(logger.NewMockWriteManager()),
|
Log: &log,
|
||||||
DefaultWindowSize: (1 << 8) - 1,
|
DefaultWindowSize: (1 << 8) - 1,
|
||||||
MaxWindowSize: (1 << 15) - 1,
|
MaxWindowSize: (1 << 15) - 1,
|
||||||
StreamWriteBufferMaxLen: 1024,
|
StreamWriteBufferMaxLen: 1024,
|
||||||
|
@ -87,7 +88,7 @@ func NewCompressedMuxerPair(t assert.TestingT, testName string, quality Compress
|
||||||
IsClient: true,
|
IsClient: true,
|
||||||
Name: "origin",
|
Name: "origin",
|
||||||
CompressionQuality: quality,
|
CompressionQuality: quality,
|
||||||
Logger: logger.NewOutputWriter(logger.NewMockWriteManager()),
|
Log: &log,
|
||||||
HeartbeatInterval: defaultTimeout,
|
HeartbeatInterval: defaultTimeout,
|
||||||
MaxHeartbeats: defaultRetries,
|
MaxHeartbeats: defaultRetries,
|
||||||
},
|
},
|
||||||
|
@ -97,7 +98,7 @@ func NewCompressedMuxerPair(t assert.TestingT, testName string, quality Compress
|
||||||
IsClient: false,
|
IsClient: false,
|
||||||
Name: "edge",
|
Name: "edge",
|
||||||
CompressionQuality: quality,
|
CompressionQuality: quality,
|
||||||
Logger: logger.NewOutputWriter(logger.NewMockWriteManager()),
|
Log: &log,
|
||||||
HeartbeatInterval: defaultTimeout,
|
HeartbeatInterval: defaultTimeout,
|
||||||
MaxHeartbeats: defaultRetries,
|
MaxHeartbeats: defaultRetries,
|
||||||
},
|
},
|
||||||
|
@ -186,11 +187,11 @@ func TestSingleStream(t *testing.T) {
|
||||||
if stream.Headers[0].Value != "headerValue" {
|
if stream.Headers[0].Value != "headerValue" {
|
||||||
t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
||||||
}
|
}
|
||||||
stream.WriteHeaders([]Header{
|
_ = stream.WriteHeaders([]Header{
|
||||||
{Name: "response-header", Value: "responseValue"},
|
{Name: "response-header", Value: "responseValue"},
|
||||||
})
|
})
|
||||||
buf := []byte("Hello world")
|
buf := []byte("Hello world")
|
||||||
stream.Write(buf)
|
_, _ = stream.Write(buf)
|
||||||
n, err := io.ReadFull(stream, buf)
|
n, err := io.ReadFull(stream, buf)
|
||||||
if n > 0 {
|
if n > 0 {
|
||||||
t.Fatalf("read %d bytes after EOF", n)
|
t.Fatalf("read %d bytes after EOF", n)
|
||||||
|
@ -230,7 +231,7 @@ func TestSingleStream(t *testing.T) {
|
||||||
if string(responseBody) != "Hello world" {
|
if string(responseBody) != "Hello world" {
|
||||||
t.Fatalf("expected response body %s, got %s", "Hello world", responseBody)
|
t.Fatalf("expected response body %s, got %s", "Hello world", responseBody)
|
||||||
}
|
}
|
||||||
stream.Close()
|
_ = stream.Close()
|
||||||
n, err = stream.Write([]byte("aaaaa"))
|
n, err = stream.Write([]byte("aaaaa"))
|
||||||
if n > 0 {
|
if n > 0 {
|
||||||
t.Fatalf("wrote %d bytes after EOF", n)
|
t.Fatalf("wrote %d bytes after EOF", n)
|
||||||
|
@ -252,7 +253,7 @@ func TestSingleStreamLargeResponseBody(t *testing.T) {
|
||||||
if stream.Headers[0].Value != "headerValue" {
|
if stream.Headers[0].Value != "headerValue" {
|
||||||
t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
||||||
}
|
}
|
||||||
stream.WriteHeaders([]Header{
|
_ = stream.WriteHeaders([]Header{
|
||||||
{Name: "response-header", Value: "responseValue"},
|
{Name: "response-header", Value: "responseValue"},
|
||||||
})
|
})
|
||||||
payload := make([]byte, bodySize)
|
payload := make([]byte, bodySize)
|
||||||
|
@ -302,7 +303,6 @@ func TestSingleStreamLargeResponseBody(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultipleStreams(t *testing.T) {
|
func TestMultipleStreams(t *testing.T) {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
|
||||||
f := MuxedStreamFunc(func(stream *MuxedStream) error {
|
f := MuxedStreamFunc(func(stream *MuxedStream) error {
|
||||||
if len(stream.Headers) != 1 {
|
if len(stream.Headers) != 1 {
|
||||||
t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers))
|
t.Fatalf("expected %d headers, got %d", 1, len(stream.Headers))
|
||||||
|
@ -310,13 +310,13 @@ func TestMultipleStreams(t *testing.T) {
|
||||||
if stream.Headers[0].Name != "client-token" {
|
if stream.Headers[0].Name != "client-token" {
|
||||||
t.Fatalf("expected header name %s, got %s", "client-token", stream.Headers[0].Name)
|
t.Fatalf("expected header name %s, got %s", "client-token", stream.Headers[0].Name)
|
||||||
}
|
}
|
||||||
l.Debugf("Got request for stream %s", stream.Headers[0].Value)
|
log.Debug().Msgf("Got request for stream %s", stream.Headers[0].Value)
|
||||||
stream.WriteHeaders([]Header{
|
_ = stream.WriteHeaders([]Header{
|
||||||
{Name: "response-token", Value: stream.Headers[0].Value},
|
{Name: "response-token", Value: stream.Headers[0].Value},
|
||||||
})
|
})
|
||||||
l.Debugf("Wrote headers for stream %s", stream.Headers[0].Value)
|
log.Debug().Msgf("Wrote headers for stream %s", stream.Headers[0].Value)
|
||||||
stream.Write([]byte("OK"))
|
_, _ = stream.Write([]byte("OK"))
|
||||||
l.Debugf("Wrote body for stream %s", stream.Headers[0].Value)
|
log.Debug().Msgf("Wrote body for stream %s", stream.Headers[0].Value)
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
muxPair := NewDefaultMuxerPair(t, t.Name(), f)
|
muxPair := NewDefaultMuxerPair(t, t.Name(), f)
|
||||||
|
@ -334,7 +334,7 @@ func TestMultipleStreams(t *testing.T) {
|
||||||
[]Header{{Name: "client-token", Value: tokenString}},
|
[]Header{{Name: "client-token", Value: tokenString}},
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
l.Debugf("Got headers for stream %d", tokenId)
|
log.Debug().Msgf("Got headers for stream %d", tokenId)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
errorsC <- err
|
errorsC <- err
|
||||||
return
|
return
|
||||||
|
@ -372,7 +372,7 @@ func TestMultipleStreams(t *testing.T) {
|
||||||
testFail := false
|
testFail := false
|
||||||
for err := range errorsC {
|
for err := range errorsC {
|
||||||
testFail = true
|
testFail = true
|
||||||
l.Errorf("%s", err)
|
log.Error().Msgf("%s", err)
|
||||||
}
|
}
|
||||||
if testFail {
|
if testFail {
|
||||||
t.Fatalf("TestMultipleStreams failed")
|
t.Fatalf("TestMultipleStreams failed")
|
||||||
|
@ -396,7 +396,7 @@ func TestMultipleStreamsFlowControl(t *testing.T) {
|
||||||
if stream.Headers[0].Value != "headerValue" {
|
if stream.Headers[0].Value != "headerValue" {
|
||||||
t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
t.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
||||||
}
|
}
|
||||||
stream.WriteHeaders([]Header{
|
_ = stream.WriteHeaders([]Header{
|
||||||
{Name: "response-header", Value: "responseValue"},
|
{Name: "response-header", Value: "responseValue"},
|
||||||
})
|
})
|
||||||
payload := make([]byte, responseSizes[(stream.streamID-2)/2])
|
payload := make([]byte, responseSizes[(stream.streamID-2)/2])
|
||||||
|
@ -450,27 +450,25 @@ func TestMultipleStreamsFlowControl(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestGracefulShutdown(t *testing.T) {
|
func TestGracefulShutdown(t *testing.T) {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
|
||||||
|
|
||||||
sendC := make(chan struct{})
|
sendC := make(chan struct{})
|
||||||
responseBuf := bytes.Repeat([]byte("Hello world"), 65536)
|
responseBuf := bytes.Repeat([]byte("Hello world"), 65536)
|
||||||
|
|
||||||
f := MuxedStreamFunc(func(stream *MuxedStream) error {
|
f := MuxedStreamFunc(func(stream *MuxedStream) error {
|
||||||
stream.WriteHeaders([]Header{
|
_ = stream.WriteHeaders([]Header{
|
||||||
{Name: "response-header", Value: "responseValue"},
|
{Name: "response-header", Value: "responseValue"},
|
||||||
})
|
})
|
||||||
<-sendC
|
<-sendC
|
||||||
l.Debugf("Writing %d bytes", len(responseBuf))
|
log.Debug().Msgf("Writing %d bytes", len(responseBuf))
|
||||||
stream.Write(responseBuf)
|
_, _ = stream.Write(responseBuf)
|
||||||
stream.CloseWrite()
|
_ = stream.CloseWrite()
|
||||||
l.Debugf("Wrote %d bytes", len(responseBuf))
|
log.Debug().Msgf("Wrote %d bytes", len(responseBuf))
|
||||||
// Reading from the stream will block until the edge closes its end of the stream.
|
// Reading from the stream will block until the edge closes its end of the stream.
|
||||||
// Otherwise, we'll close the whole connection before receiving the 'stream closed'
|
// Otherwise, we'll close the whole connection before receiving the 'stream closed'
|
||||||
// message from the edge.
|
// message from the edge.
|
||||||
// Graceful shutdown works if you omit this, it just gives spurious errors for now -
|
// Graceful shutdown works if you omit this, it just gives spurious errors for now -
|
||||||
// TODO ignore errors when writing 'stream closed' and we're shutting down.
|
// TODO ignore errors when writing 'stream closed' and we're shutting down.
|
||||||
stream.Read([]byte{0})
|
_, _ = stream.Read([]byte{0})
|
||||||
l.Debugf("Handler ends")
|
log.Debug().Msgf("Handler ends")
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
muxPair := NewDefaultMuxerPair(t, t.Name(), f)
|
muxPair := NewDefaultMuxerPair(t, t.Name(), f)
|
||||||
|
@ -487,7 +485,7 @@ func TestGracefulShutdown(t *testing.T) {
|
||||||
muxPair.EdgeMux.Shutdown()
|
muxPair.EdgeMux.Shutdown()
|
||||||
close(sendC)
|
close(sendC)
|
||||||
responseBody := make([]byte, len(responseBuf))
|
responseBody := make([]byte, len(responseBuf))
|
||||||
l.Debugf("Waiting for %d bytes", len(responseBuf))
|
log.Debug().Msgf("Waiting for %d bytes", len(responseBuf))
|
||||||
n, err := io.ReadFull(stream, responseBody)
|
n, err := io.ReadFull(stream, responseBody)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error from (*MuxedStream).Read with %d bytes read: %s", n, err)
|
t.Fatalf("error from (*MuxedStream).Read with %d bytes read: %s", n, err)
|
||||||
|
@ -498,7 +496,7 @@ func TestGracefulShutdown(t *testing.T) {
|
||||||
if !bytes.Equal(responseBuf, responseBody) {
|
if !bytes.Equal(responseBuf, responseBody) {
|
||||||
t.Fatalf("response body mismatch")
|
t.Fatalf("response body mismatch")
|
||||||
}
|
}
|
||||||
stream.Close()
|
_ = stream.Close()
|
||||||
muxPair.Wait(t)
|
muxPair.Wait(t)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -509,7 +507,7 @@ func TestUnexpectedShutdown(t *testing.T) {
|
||||||
|
|
||||||
f := MuxedStreamFunc(func(stream *MuxedStream) error {
|
f := MuxedStreamFunc(func(stream *MuxedStream) error {
|
||||||
defer close(handlerFinishC)
|
defer close(handlerFinishC)
|
||||||
stream.WriteHeaders([]Header{
|
_ = stream.WriteHeaders([]Header{
|
||||||
{Name: "response-header", Value: "responseValue"},
|
{Name: "response-header", Value: "responseValue"},
|
||||||
})
|
})
|
||||||
<-sendC
|
<-sendC
|
||||||
|
@ -536,7 +534,7 @@ func TestUnexpectedShutdown(t *testing.T) {
|
||||||
nil,
|
nil,
|
||||||
)
|
)
|
||||||
// Close the underlying connection before telling the origin to write.
|
// Close the underlying connection before telling the origin to write.
|
||||||
muxPair.EdgeConn.Close()
|
_ = muxPair.EdgeConn.Close()
|
||||||
close(sendC)
|
close(sendC)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error in OpenStream: %s", err)
|
t.Fatalf("error in OpenStream: %s", err)
|
||||||
|
@ -559,18 +557,18 @@ func TestUnexpectedShutdown(t *testing.T) {
|
||||||
|
|
||||||
func EchoHandler(stream *MuxedStream) error {
|
func EchoHandler(stream *MuxedStream) error {
|
||||||
var buf bytes.Buffer
|
var buf bytes.Buffer
|
||||||
fmt.Fprintf(&buf, "Hello, world!\n\n# REQUEST HEADERS:\n\n")
|
_, _ = fmt.Fprintf(&buf, "Hello, world!\n\n# REQUEST HEADERS:\n\n")
|
||||||
for _, header := range stream.Headers {
|
for _, header := range stream.Headers {
|
||||||
fmt.Fprintf(&buf, "[%s] = %s\n", header.Name, header.Value)
|
_, _ = fmt.Fprintf(&buf, "[%s] = %s\n", header.Name, header.Value)
|
||||||
}
|
}
|
||||||
stream.WriteHeaders([]Header{
|
_ = stream.WriteHeaders([]Header{
|
||||||
{Name: ":status", Value: "200"},
|
{Name: ":status", Value: "200"},
|
||||||
{Name: "server", Value: "Echo-server/1.0"},
|
{Name: "server", Value: "Echo-server/1.0"},
|
||||||
{Name: "date", Value: time.Now().Format(time.RFC850)},
|
{Name: "date", Value: time.Now().Format(time.RFC850)},
|
||||||
{Name: "content-type", Value: "text/html; charset=utf-8"},
|
{Name: "content-type", Value: "text/html; charset=utf-8"},
|
||||||
{Name: "content-length", Value: strconv.Itoa(buf.Len())},
|
{Name: "content-length", Value: strconv.Itoa(buf.Len())},
|
||||||
})
|
})
|
||||||
buf.WriteTo(stream)
|
_, _ = buf.WriteTo(stream)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -582,14 +580,14 @@ func TestOpenAfterDisconnect(t *testing.T) {
|
||||||
switch i {
|
switch i {
|
||||||
case 0:
|
case 0:
|
||||||
// Close both directions of the connection to cause EOF on both peers.
|
// Close both directions of the connection to cause EOF on both peers.
|
||||||
muxPair.OriginConn.Close()
|
_ = muxPair.OriginConn.Close()
|
||||||
muxPair.EdgeConn.Close()
|
_ = muxPair.EdgeConn.Close()
|
||||||
case 1:
|
case 1:
|
||||||
// Close origin conn to cause EOF on origin first.
|
// Close origin conn to cause EOF on origin first.
|
||||||
muxPair.OriginConn.Close()
|
_ = muxPair.OriginConn.Close()
|
||||||
case 2:
|
case 2:
|
||||||
// Close edge conn to cause EOF on edge first.
|
// Close edge conn to cause EOF on edge first.
|
||||||
muxPair.EdgeConn.Close()
|
_ = muxPair.EdgeConn.Close()
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err := muxPair.OpenEdgeMuxStream(
|
_, err := muxPair.OpenEdgeMuxStream(
|
||||||
|
@ -617,7 +615,7 @@ func TestHPACK(t *testing.T) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("error in OpenStream: %s", err)
|
t.Fatalf("error in OpenStream: %s", err)
|
||||||
}
|
}
|
||||||
stream.Close()
|
_ = stream.Close()
|
||||||
|
|
||||||
for i := 0; i < 3; i++ {
|
for i := 0; i < 3; i++ {
|
||||||
stream, err := muxPair.OpenEdgeMuxStream(
|
stream, err := muxPair.OpenEdgeMuxStream(
|
||||||
|
@ -654,8 +652,8 @@ func TestHPACK(t *testing.T) {
|
||||||
if stream.Headers[0].Value != "200" {
|
if stream.Headers[0].Value != "200" {
|
||||||
t.Fatalf("expected status 200, got %s", stream.Headers[0].Value)
|
t.Fatalf("expected status 200, got %s", stream.Headers[0].Value)
|
||||||
}
|
}
|
||||||
ioutil.ReadAll(stream)
|
_, _ = ioutil.ReadAll(stream)
|
||||||
stream.Close()
|
_ = stream.Close()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -680,7 +678,7 @@ func AssertIfPipeReadable(t *testing.T, pipe io.ReadCloser) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestMultipleStreamsWithDictionaries(t *testing.T) {
|
func TestMultipleStreamsWithDictionaries(t *testing.T) {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
l := zerolog.Nop()
|
||||||
|
|
||||||
for q := CompressionNone; q <= CompressionMax; q++ {
|
for q := CompressionNone; q <= CompressionMax; q++ {
|
||||||
htmlBody := `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"` +
|
htmlBody := `<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.1//EN"` +
|
||||||
|
@ -730,10 +728,10 @@ func TestMultipleStreamsWithDictionaries(t *testing.T) {
|
||||||
contentType = "img/gif"
|
contentType = "img/gif"
|
||||||
}
|
}
|
||||||
|
|
||||||
stream.WriteHeaders([]Header{
|
_ = stream.WriteHeaders([]Header{
|
||||||
Header{Name: "content-type", Value: contentType},
|
{Name: "content-type", Value: contentType},
|
||||||
})
|
})
|
||||||
stream.Write([]byte(strings.Replace(htmlBody, "paragraph", pathHeader.Value, 1) + stream.Headers[5].Value))
|
_, _ = stream.Write([]byte(strings.Replace(htmlBody, "paragraph", pathHeader.Value, 1) + stream.Headers[5].Value))
|
||||||
|
|
||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
@ -817,7 +815,7 @@ func TestMultipleStreamsWithDictionaries(t *testing.T) {
|
||||||
testFail := false
|
testFail := false
|
||||||
for err := range errorsC {
|
for err := range errorsC {
|
||||||
testFail = true
|
testFail = true
|
||||||
l.Errorf("%s", err)
|
l.Error().Msgf("%s", err)
|
||||||
}
|
}
|
||||||
if testFail {
|
if testFail {
|
||||||
t.Fatalf("TestMultipleStreams failed")
|
t.Fatalf("TestMultipleStreams failed")
|
||||||
|
@ -831,8 +829,6 @@ func TestMultipleStreamsWithDictionaries(t *testing.T) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func sampleSiteHandler(files map[string][]byte) MuxedStreamFunc {
|
func sampleSiteHandler(files map[string][]byte) MuxedStreamFunc {
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
|
||||||
|
|
||||||
return func(stream *MuxedStream) error {
|
return func(stream *MuxedStream) error {
|
||||||
var contentType string
|
var contentType string
|
||||||
var pathHeader Header
|
var pathHeader Header
|
||||||
|
@ -857,16 +853,16 @@ func sampleSiteHandler(files map[string][]byte) MuxedStreamFunc {
|
||||||
} else {
|
} else {
|
||||||
contentType = "img/gif"
|
contentType = "img/gif"
|
||||||
}
|
}
|
||||||
stream.WriteHeaders([]Header{
|
_ = stream.WriteHeaders([]Header{
|
||||||
Header{Name: "content-type", Value: contentType},
|
{Name: "content-type", Value: contentType},
|
||||||
})
|
})
|
||||||
l.Debugf("Wrote headers for stream %s", pathHeader.Value)
|
log.Debug().Msgf("Wrote headers for stream %s", pathHeader.Value)
|
||||||
file, ok := files[pathHeader.Value]
|
file, ok := files[pathHeader.Value]
|
||||||
if !ok {
|
if !ok {
|
||||||
return fmt.Errorf("%s content is not preloaded", pathHeader.Value)
|
return fmt.Errorf("%s content is not preloaded", pathHeader.Value)
|
||||||
}
|
}
|
||||||
stream.Write(file)
|
_, _ = stream.Write(file)
|
||||||
l.Debugf("Wrote body for stream %s", pathHeader.Value)
|
log.Debug().Msgf("Wrote body for stream %s", pathHeader.Value)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1008,7 +1004,7 @@ func BenchmarkOpenStream(b *testing.B) {
|
||||||
if stream.Headers[0].Value != "headerValue" {
|
if stream.Headers[0].Value != "headerValue" {
|
||||||
b.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
b.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
||||||
}
|
}
|
||||||
stream.WriteHeaders([]Header{
|
_ = stream.WriteHeaders([]Header{
|
||||||
{Name: "response-header", Value: "responseValue"},
|
{Name: "response-header", Value: "responseValue"},
|
||||||
})
|
})
|
||||||
return nil
|
return nil
|
||||||
|
@ -1058,7 +1054,7 @@ func BenchmarkSingleStreamLargeResponseBody(b *testing.B) {
|
||||||
if stream.Headers[0].Value != "headerValue" {
|
if stream.Headers[0].Value != "headerValue" {
|
||||||
b.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
b.Fatalf("expected header value %s, got %s", "headerValue", stream.Headers[0].Value)
|
||||||
}
|
}
|
||||||
stream.WriteHeaders([]Header{
|
_ = stream.WriteHeaders([]Header{
|
||||||
{Name: "response-header", Value: "responseValue"},
|
{Name: "response-header", Value: "responseValue"},
|
||||||
})
|
})
|
||||||
for i := 0; i < writeN; i++ {
|
for i := 0; i < writeN; i++ {
|
||||||
|
@ -1083,7 +1079,7 @@ func BenchmarkSingleStreamLargeResponseBody(b *testing.B) {
|
||||||
Handler: f,
|
Handler: f,
|
||||||
IsClient: true,
|
IsClient: true,
|
||||||
Name: "origin",
|
Name: "origin",
|
||||||
Logger: logger.NewOutputWriter(logger.NewMockWriteManager()),
|
Log: &log,
|
||||||
DefaultWindowSize: defaultWindowSize,
|
DefaultWindowSize: defaultWindowSize,
|
||||||
MaxWindowSize: maxWindowSize,
|
MaxWindowSize: maxWindowSize,
|
||||||
StreamWriteBufferMaxLen: defaultWriteBufferMaxLen,
|
StreamWriteBufferMaxLen: defaultWriteBufferMaxLen,
|
||||||
|
@ -1095,7 +1091,7 @@ func BenchmarkSingleStreamLargeResponseBody(b *testing.B) {
|
||||||
Timeout: testHandshakeTimeout,
|
Timeout: testHandshakeTimeout,
|
||||||
IsClient: false,
|
IsClient: false,
|
||||||
Name: "edge",
|
Name: "edge",
|
||||||
Logger: logger.NewOutputWriter(logger.NewMockWriteManager()),
|
Log: &log,
|
||||||
DefaultWindowSize: defaultWindowSize,
|
DefaultWindowSize: defaultWindowSize,
|
||||||
MaxWindowSize: maxWindowSize,
|
MaxWindowSize: maxWindowSize,
|
||||||
StreamWriteBufferMaxLen: defaultWriteBufferMaxLen,
|
StreamWriteBufferMaxLen: defaultWriteBufferMaxLen,
|
||||||
|
|
|
@ -4,8 +4,8 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/golang-collections/collections/queue"
|
"github.com/golang-collections/collections/queue"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// data points used to compute average receive window and send window size
|
// data points used to compute average receive window and send window size
|
||||||
|
@ -20,7 +20,7 @@ type muxMetricsUpdater interface {
|
||||||
// metrics returns the latest metrics
|
// metrics returns the latest metrics
|
||||||
metrics() *MuxerMetrics
|
metrics() *MuxerMetrics
|
||||||
// run is a blocking call to start the event loop
|
// run is a blocking call to start the event loop
|
||||||
run(logger logger.Service) error
|
run(log *zerolog.Logger) error
|
||||||
// updateRTTChan is called by muxReader to report new RTT measurements
|
// updateRTTChan is called by muxReader to report new RTT measurements
|
||||||
updateRTT(rtt *roundTripMeasurement)
|
updateRTT(rtt *roundTripMeasurement)
|
||||||
//updateReceiveWindowChan is called by muxReader and muxWriter when receiveWindow size is updated
|
//updateReceiveWindowChan is called by muxReader and muxWriter when receiveWindow size is updated
|
||||||
|
@ -137,30 +137,30 @@ func (updater *muxMetricsUpdaterImpl) metrics() *MuxerMetrics {
|
||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
func (updater *muxMetricsUpdaterImpl) run(logger logger.Service) error {
|
func (updater *muxMetricsUpdaterImpl) run(log *zerolog.Logger) error {
|
||||||
defer logger.Debug("mux - metrics: event loop finished")
|
defer log.Debug().Msg("mux - metrics: event loop finished")
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-updater.abortChan:
|
case <-updater.abortChan:
|
||||||
logger.Infof("mux - metrics: Stopping mux metrics updater")
|
log.Info().Msgf("mux - metrics: Stopping mux metrics updater")
|
||||||
return nil
|
return nil
|
||||||
case roundTripMeasurement := <-updater.updateRTTChan:
|
case roundTripMeasurement := <-updater.updateRTTChan:
|
||||||
go updater.rttData.update(roundTripMeasurement)
|
go updater.rttData.update(roundTripMeasurement)
|
||||||
logger.Debug("mux - metrics: Update rtt")
|
log.Debug().Msg("mux - metrics: Update rtt")
|
||||||
case receiveWindow := <-updater.updateReceiveWindowChan:
|
case receiveWindow := <-updater.updateReceiveWindowChan:
|
||||||
go updater.receiveWindowData.update(receiveWindow)
|
go updater.receiveWindowData.update(receiveWindow)
|
||||||
logger.Debug("mux - metrics: Update receive window")
|
log.Debug().Msg("mux - metrics: Update receive window")
|
||||||
case sendWindow := <-updater.updateSendWindowChan:
|
case sendWindow := <-updater.updateSendWindowChan:
|
||||||
go updater.sendWindowData.update(sendWindow)
|
go updater.sendWindowData.update(sendWindow)
|
||||||
logger.Debug("mux - metrics: Update send window")
|
log.Debug().Msg("mux - metrics: Update send window")
|
||||||
case inBoundBytes := <-updater.updateInBoundBytesChan:
|
case inBoundBytes := <-updater.updateInBoundBytesChan:
|
||||||
// inBoundBytes is bytes/sec because the update interval is 1 sec
|
// inBoundBytes is bytes/sec because the update interval is 1 sec
|
||||||
go updater.inBoundRate.update(inBoundBytes)
|
go updater.inBoundRate.update(inBoundBytes)
|
||||||
logger.Debugf("mux - metrics: Inbound bytes %d", inBoundBytes)
|
log.Debug().Msgf("mux - metrics: Inbound bytes %d", inBoundBytes)
|
||||||
case outBoundBytes := <-updater.updateOutBoundBytesChan:
|
case outBoundBytes := <-updater.updateOutBoundBytesChan:
|
||||||
// outBoundBytes is bytes/sec because the update interval is 1 sec
|
// outBoundBytes is bytes/sec because the update interval is 1 sec
|
||||||
go updater.outBoundRate.update(outBoundBytes)
|
go updater.outBoundRate.update(outBoundBytes)
|
||||||
logger.Debugf("mux - metrics: Outbound bytes %d", outBoundBytes)
|
log.Debug().Msgf("mux - metrics: Outbound bytes %d", outBoundBytes)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -5,7 +5,7 @@ import (
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -91,10 +91,10 @@ func TestMuxMetricsUpdater(t *testing.T) {
|
||||||
abortChan := make(chan struct{})
|
abortChan := make(chan struct{})
|
||||||
compBefore, compAfter := NewAtomicCounter(0), NewAtomicCounter(0)
|
compBefore, compAfter := NewAtomicCounter(0), NewAtomicCounter(0)
|
||||||
m := newMuxMetricsUpdater(abortChan, compBefore, compAfter)
|
m := newMuxMetricsUpdater(abortChan, compBefore, compAfter)
|
||||||
logger := logger.NewOutputWriter(logger.NewMockWriteManager())
|
log := zerolog.Nop()
|
||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
errChan <- m.run(logger)
|
errChan <- m.run(&log)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
|
|
|
@ -8,7 +8,7 @@ import (
|
||||||
"net/url"
|
"net/url"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/rs/zerolog"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -68,8 +68,8 @@ func (r *MuxReader) Shutdown() <-chan struct{} {
|
||||||
return done
|
return done
|
||||||
}
|
}
|
||||||
|
|
||||||
func (r *MuxReader) run(logger logger.Service) error {
|
func (r *MuxReader) run(log *zerolog.Logger) error {
|
||||||
defer logger.Debug("mux - read: event loop finished")
|
defer log.Debug().Msg("mux - read: event loop finished")
|
||||||
|
|
||||||
// routine to periodically update bytesRead
|
// routine to periodically update bytesRead
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -93,7 +93,7 @@ func (r *MuxReader) run(logger logger.Service) error {
|
||||||
}
|
}
|
||||||
switch e := err.(type) {
|
switch e := err.(type) {
|
||||||
case http2.StreamError:
|
case http2.StreamError:
|
||||||
logger.Infof("%s: stream error", errorString)
|
log.Info().Msgf("%s: stream error", errorString)
|
||||||
// Ideally we wouldn't return here, since that aborts the muxer.
|
// Ideally we wouldn't return here, since that aborts the muxer.
|
||||||
// We should communicate the error to the relevant MuxedStream
|
// We should communicate the error to the relevant MuxedStream
|
||||||
// data structure, so that callers of MuxedStream.Read() and
|
// data structure, so that callers of MuxedStream.Read() and
|
||||||
|
@ -101,28 +101,28 @@ func (r *MuxReader) run(logger logger.Service) error {
|
||||||
// and keep the muxer going.
|
// and keep the muxer going.
|
||||||
return r.streamError(e.StreamID, e.Code)
|
return r.streamError(e.StreamID, e.Code)
|
||||||
case http2.ConnectionError:
|
case http2.ConnectionError:
|
||||||
logger.Infof("%s: stream error", errorString)
|
log.Info().Msgf("%s: stream error", errorString)
|
||||||
return r.connectionError(err)
|
return r.connectionError(err)
|
||||||
default:
|
default:
|
||||||
if isConnectionClosedError(err) {
|
if isConnectionClosedError(err) {
|
||||||
if r.streams.Len() == 0 {
|
if r.streams.Len() == 0 {
|
||||||
// don't log the error here -- that would just be extra noise
|
// don't log the error here -- that would just be extra noise
|
||||||
logger.Debug("mux - read: shutting down")
|
log.Debug().Msg("mux - read: shutting down")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
logger.Infof("%s: connection closed unexpectedly", errorString)
|
log.Info().Msgf("%s: connection closed unexpectedly", errorString)
|
||||||
return err
|
return err
|
||||||
} else {
|
} else {
|
||||||
logger.Infof("%s: frame read error", errorString)
|
log.Info().Msgf("%s: frame read error", errorString)
|
||||||
return r.connectionError(err)
|
return r.connectionError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
r.connActive.Signal()
|
r.connActive.Signal()
|
||||||
logger.Debugf("mux - read: read frame: data %v", frame)
|
log.Debug().Msgf("mux - read: read frame: data %v", frame)
|
||||||
switch f := frame.(type) {
|
switch f := frame.(type) {
|
||||||
case *http2.DataFrame:
|
case *http2.DataFrame:
|
||||||
err = r.receiveFrameData(f, logger)
|
err = r.receiveFrameData(f, log)
|
||||||
case *http2.MetaHeadersFrame:
|
case *http2.MetaHeadersFrame:
|
||||||
err = r.receiveHeaderData(f)
|
err = r.receiveHeaderData(f)
|
||||||
case *http2.RSTStreamFrame:
|
case *http2.RSTStreamFrame:
|
||||||
|
@ -155,7 +155,7 @@ func (r *MuxReader) run(logger logger.Service) error {
|
||||||
err = ErrUnexpectedFrameType
|
err = ErrUnexpectedFrameType
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Debugf("mux - read: read error: data %v", frame)
|
log.Debug().Msgf("mux - read: read error: data %v", frame)
|
||||||
return r.connectionError(err)
|
return r.connectionError(err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -276,7 +276,7 @@ func (r *MuxReader) handleStream(stream *MuxedStream) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Receives a data frame from a stream. A non-nil error is a connection error.
|
// Receives a data frame from a stream. A non-nil error is a connection error.
|
||||||
func (r *MuxReader) receiveFrameData(frame *http2.DataFrame, logger logger.Service) error {
|
func (r *MuxReader) receiveFrameData(frame *http2.DataFrame, log *zerolog.Logger) error {
|
||||||
stream, err := r.getStreamForFrame(frame)
|
stream, err := r.getStreamForFrame(frame)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return r.defaultStreamErrorHandler(err, frame.Header())
|
return r.defaultStreamErrorHandler(err, frame.Header())
|
||||||
|
@ -292,9 +292,9 @@ func (r *MuxReader) receiveFrameData(frame *http2.DataFrame, logger logger.Servi
|
||||||
if frame.Header().Flags.Has(http2.FlagDataEndStream) {
|
if frame.Header().Flags.Has(http2.FlagDataEndStream) {
|
||||||
if stream.receiveEOF() {
|
if stream.receiveEOF() {
|
||||||
r.streams.Delete(stream.streamID)
|
r.streams.Delete(stream.streamID)
|
||||||
logger.Debugf("mux - read: stream closed: streamID: %d", frame.Header().StreamID)
|
log.Debug().Msgf("mux - read: stream closed: streamID: %d", frame.Header().StreamID)
|
||||||
} else {
|
} else {
|
||||||
logger.Debugf("mux - read: shutdown receive side: streamID: %d", frame.Header().StreamID)
|
log.Debug().Msgf("mux - read: shutdown receive side: streamID: %d", frame.Header().StreamID)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -3,10 +3,10 @@ package h2mux
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"bytes"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"io"
|
"io"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
"golang.org/x/net/http2/hpack"
|
"golang.org/x/net/http2/hpack"
|
||||||
)
|
)
|
||||||
|
@ -72,8 +72,8 @@ func tsToPingData(ts int64) [8]byte {
|
||||||
return pingData
|
return pingData
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *MuxWriter) run(logger logger.Service) error {
|
func (w *MuxWriter) run(log *zerolog.Logger) error {
|
||||||
defer logger.Debug("mux - write: event loop finished")
|
defer log.Debug().Msg("mux - write: event loop finished")
|
||||||
|
|
||||||
// routine to periodically communicate bytesWrote
|
// routine to periodically communicate bytesWrote
|
||||||
go func() {
|
go func() {
|
||||||
|
@ -91,17 +91,17 @@ func (w *MuxWriter) run(logger logger.Service) error {
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-w.abortChan:
|
case <-w.abortChan:
|
||||||
logger.Debug("mux - write: aborting writer thread")
|
log.Debug().Msg("mux - write: aborting writer thread")
|
||||||
return nil
|
return nil
|
||||||
case errCode := <-w.goAwayChan:
|
case errCode := <-w.goAwayChan:
|
||||||
logger.Debugf("mux - write: sending GOAWAY code %v", errCode)
|
log.Debug().Msgf("mux - write: sending GOAWAY code %v", errCode)
|
||||||
err := w.f.WriteGoAway(w.streams.LastPeerStreamID(), errCode, []byte{})
|
err := w.f.WriteGoAway(w.streams.LastPeerStreamID(), errCode, []byte{})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
w.idleTimer.MarkActive()
|
w.idleTimer.MarkActive()
|
||||||
case <-w.pingTimestamp.GetUpdateChan():
|
case <-w.pingTimestamp.GetUpdateChan():
|
||||||
logger.Debug("mux - write: sending PING ACK")
|
log.Debug().Msg("mux - write: sending PING ACK")
|
||||||
err := w.f.WritePing(true, tsToPingData(w.pingTimestamp.Get()))
|
err := w.f.WritePing(true, tsToPingData(w.pingTimestamp.Get()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -111,7 +111,7 @@ func (w *MuxWriter) run(logger logger.Service) error {
|
||||||
if !w.idleTimer.Retry() {
|
if !w.idleTimer.Retry() {
|
||||||
return ErrConnectionDropped
|
return ErrConnectionDropped
|
||||||
}
|
}
|
||||||
logger.Debug("mux - write: sending PING")
|
log.Debug().Msg("mux - write: sending PING")
|
||||||
err := w.f.WritePing(false, tsToPingData(time.Now().UnixNano()))
|
err := w.f.WritePing(false, tsToPingData(time.Now().UnixNano()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -121,7 +121,7 @@ func (w *MuxWriter) run(logger logger.Service) error {
|
||||||
w.idleTimer.MarkActive()
|
w.idleTimer.MarkActive()
|
||||||
case <-w.streamErrors.GetSignalChan():
|
case <-w.streamErrors.GetSignalChan():
|
||||||
for streamID, errCode := range w.streamErrors.GetErrors() {
|
for streamID, errCode := range w.streamErrors.GetErrors() {
|
||||||
logger.Debugf("mux - write: resetting stream with code: %v streamID: %d", errCode, streamID)
|
log.Debug().Msgf("mux - write: resetting stream with code: %v streamID: %d", errCode, streamID)
|
||||||
err := w.f.WriteRSTStream(streamID, errCode)
|
err := w.f.WriteRSTStream(streamID, errCode)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -141,7 +141,7 @@ func (w *MuxWriter) run(logger logger.Service) error {
|
||||||
if streamRequest.body != nil {
|
if streamRequest.body != nil {
|
||||||
go streamRequest.flushBody()
|
go streamRequest.flushBody()
|
||||||
}
|
}
|
||||||
err := w.writeStreamData(streamRequest.stream, logger)
|
err := w.writeStreamData(streamRequest.stream, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -151,7 +151,7 @@ func (w *MuxWriter) run(logger logger.Service) error {
|
||||||
if !ok {
|
if !ok {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
err := w.writeStreamData(stream, logger)
|
err := w.writeStreamData(stream, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
@ -159,7 +159,7 @@ func (w *MuxWriter) run(logger logger.Service) error {
|
||||||
case useDict := <-w.useDictChan:
|
case useDict := <-w.useDictChan:
|
||||||
err := w.writeUseDictionary(useDict)
|
err := w.writeUseDictionary(useDict)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("mux - write: error writing use dictionary: %s", err)
|
log.Error().Msgf("mux - write: error writing use dictionary: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
w.idleTimer.MarkActive()
|
w.idleTimer.MarkActive()
|
||||||
|
@ -167,18 +167,18 @@ func (w *MuxWriter) run(logger logger.Service) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *MuxWriter) writeStreamData(stream *MuxedStream, logger logger.Service) error {
|
func (w *MuxWriter) writeStreamData(stream *MuxedStream, log *zerolog.Logger) error {
|
||||||
logger.Debugf("mux - write: writable: streamID: %d", stream.streamID)
|
log.Debug().Msgf("mux - write: writable: streamID: %d", stream.streamID)
|
||||||
chunk := stream.getChunk()
|
chunk := stream.getChunk()
|
||||||
w.metricsUpdater.updateReceiveWindow(stream.getReceiveWindow())
|
w.metricsUpdater.updateReceiveWindow(stream.getReceiveWindow())
|
||||||
w.metricsUpdater.updateSendWindow(stream.getSendWindow())
|
w.metricsUpdater.updateSendWindow(stream.getSendWindow())
|
||||||
if chunk.sendHeadersFrame() {
|
if chunk.sendHeadersFrame() {
|
||||||
err := w.writeHeaders(chunk.streamID, chunk.headers)
|
err := w.writeHeaders(chunk.streamID, chunk.headers)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("mux - write: error writing headers: %s: streamID: %d", err, stream.streamID)
|
log.Error().Msgf("mux - write: error writing headers: %s: streamID: %d", err, stream.streamID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logger.Debugf("mux - write: output headers: streamID: %d", stream.streamID)
|
log.Debug().Msgf("mux - write: output headers: streamID: %d", stream.streamID)
|
||||||
}
|
}
|
||||||
|
|
||||||
if chunk.sendWindowUpdateFrame() {
|
if chunk.sendWindowUpdateFrame() {
|
||||||
|
@ -189,22 +189,22 @@ func (w *MuxWriter) writeStreamData(stream *MuxedStream, logger logger.Service)
|
||||||
// window, unless the receiver treats this as a connection error"
|
// window, unless the receiver treats this as a connection error"
|
||||||
err := w.f.WriteWindowUpdate(chunk.streamID, chunk.windowUpdate)
|
err := w.f.WriteWindowUpdate(chunk.streamID, chunk.windowUpdate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("mux - write: error writing window update: %s: streamID: %d", err, stream.streamID)
|
log.Error().Msgf("mux - write: error writing window update: %s: streamID: %d", err, stream.streamID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
logger.Debugf("mux - write: increment receive window by %d streamID: %d", chunk.windowUpdate, stream.streamID)
|
log.Debug().Msgf("mux - write: increment receive window by %d streamID: %d", chunk.windowUpdate, stream.streamID)
|
||||||
}
|
}
|
||||||
|
|
||||||
for chunk.sendDataFrame() {
|
for chunk.sendDataFrame() {
|
||||||
payload, sentEOF := chunk.nextDataFrame(int(w.maxFrameSize))
|
payload, sentEOF := chunk.nextDataFrame(int(w.maxFrameSize))
|
||||||
err := w.f.WriteData(chunk.streamID, sentEOF, payload)
|
err := w.f.WriteData(chunk.streamID, sentEOF, payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("mux - write: error writing data: %s: streamID: %d", err, stream.streamID)
|
log.Error().Msgf("mux - write: error writing data: %s: streamID: %d", err, stream.streamID)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
// update the amount of data wrote
|
// update the amount of data wrote
|
||||||
w.bytesWrote.IncrementBy(uint64(len(payload)))
|
w.bytesWrote.IncrementBy(uint64(len(payload)))
|
||||||
logger.Debugf("mux - write: output data: %d: streamID: %d", len(payload), stream.streamID)
|
log.Debug().Msgf("mux - write: output data: %d: streamID: %d", len(payload), stream.streamID)
|
||||||
|
|
||||||
if sentEOF {
|
if sentEOF {
|
||||||
if stream.readBuffer.Closed() {
|
if stream.readBuffer.Closed() {
|
||||||
|
@ -212,15 +212,15 @@ func (w *MuxWriter) writeStreamData(stream *MuxedStream, logger logger.Service)
|
||||||
if !stream.gotReceiveEOF() {
|
if !stream.gotReceiveEOF() {
|
||||||
// the peer may send data that we no longer want to receive. Force them into the
|
// the peer may send data that we no longer want to receive. Force them into the
|
||||||
// closed state.
|
// closed state.
|
||||||
logger.Debugf("mux - write: resetting stream: streamID: %d", stream.streamID)
|
log.Debug().Msgf("mux - write: resetting stream: streamID: %d", stream.streamID)
|
||||||
w.f.WriteRSTStream(chunk.streamID, http2.ErrCodeNo)
|
w.f.WriteRSTStream(chunk.streamID, http2.ErrCodeNo)
|
||||||
} else {
|
} else {
|
||||||
// Half-open stream transitioned into closed
|
// Half-open stream transitioned into closed
|
||||||
logger.Debugf("mux - write: closing stream: streamID: %d", stream.streamID)
|
log.Debug().Msgf("mux - write: closing stream: streamID: %d", stream.streamID)
|
||||||
}
|
}
|
||||||
w.streams.Delete(chunk.streamID)
|
w.streams.Delete(chunk.streamID)
|
||||||
} else {
|
} else {
|
||||||
logger.Debugf("mux - write: closing stream write side: streamID: %d", stream.streamID)
|
log.Debug().Msgf("mux - write: closing stream write side: streamID: %d", stream.streamID)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -12,8 +12,8 @@ import (
|
||||||
"os"
|
"os"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/gorilla/websocket"
|
"github.com/gorilla/websocket"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/tlsconfig"
|
"github.com/cloudflare/cloudflared/tlsconfig"
|
||||||
)
|
)
|
||||||
|
@ -99,8 +99,8 @@ const indexTemplate = `
|
||||||
</html>
|
</html>
|
||||||
`
|
`
|
||||||
|
|
||||||
func StartHelloWorldServer(logger logger.Service, listener net.Listener, shutdownC <-chan struct{}) error {
|
func StartHelloWorldServer(log *zerolog.Logger, listener net.Listener, shutdownC <-chan struct{}) error {
|
||||||
logger.Infof("Starting Hello World server at %s", listener.Addr())
|
log.Info().Msgf("Starting Hello World server at %s", listener.Addr())
|
||||||
serverName := defaultServerName
|
serverName := defaultServerName
|
||||||
if hostname, err := os.Hostname(); err == nil {
|
if hostname, err := os.Hostname(); err == nil {
|
||||||
serverName = hostname
|
serverName = hostname
|
||||||
|
@ -113,14 +113,14 @@ func StartHelloWorldServer(logger logger.Service, listener net.Listener, shutdow
|
||||||
|
|
||||||
muxer := http.NewServeMux()
|
muxer := http.NewServeMux()
|
||||||
muxer.HandleFunc(UptimeRoute, uptimeHandler(time.Now()))
|
muxer.HandleFunc(UptimeRoute, uptimeHandler(time.Now()))
|
||||||
muxer.HandleFunc(WSRoute, websocketHandler(logger, upgrader))
|
muxer.HandleFunc(WSRoute, websocketHandler(log, upgrader))
|
||||||
muxer.HandleFunc(SSERoute, sseHandler(logger))
|
muxer.HandleFunc(SSERoute, sseHandler(log))
|
||||||
muxer.HandleFunc(HealthRoute, healthHandler())
|
muxer.HandleFunc(HealthRoute, healthHandler())
|
||||||
muxer.HandleFunc("/", rootHandler(serverName))
|
muxer.HandleFunc("/", rootHandler(serverName))
|
||||||
httpServer := &http.Server{Addr: listener.Addr().String(), Handler: muxer}
|
httpServer := &http.Server{Addr: listener.Addr().String(), Handler: muxer}
|
||||||
go func() {
|
go func() {
|
||||||
<-shutdownC
|
<-shutdownC
|
||||||
httpServer.Close()
|
_ = httpServer.Close()
|
||||||
}()
|
}()
|
||||||
|
|
||||||
err := httpServer.Serve(listener)
|
err := httpServer.Serve(listener)
|
||||||
|
@ -152,13 +152,13 @@ func uptimeHandler(startTime time.Time) http.HandlerFunc {
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
} else {
|
} else {
|
||||||
w.Header().Set("Content-Type", "application/json")
|
w.Header().Set("Content-Type", "application/json")
|
||||||
w.Write(respJson)
|
_, _ = w.Write(respJson)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
// This handler will echo message
|
// This handler will echo message
|
||||||
func websocketHandler(logger logger.Service, upgrader websocket.Upgrader) http.HandlerFunc {
|
func websocketHandler(log *zerolog.Logger, upgrader websocket.Upgrader) http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
// This addresses the issue of r.Host includes port but origin header doesn't
|
// This addresses the issue of r.Host includes port but origin header doesn't
|
||||||
host, _, err := net.SplitHostPort(r.Host)
|
host, _, err := net.SplitHostPort(r.Host)
|
||||||
|
@ -168,32 +168,32 @@ func websocketHandler(logger logger.Service, upgrader websocket.Upgrader) http.H
|
||||||
|
|
||||||
conn, err := upgrader.Upgrade(w, r, nil)
|
conn, err := upgrader.Upgrade(w, r, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("failed to upgrade to websocket connection, error: %s", err)
|
log.Error().Msgf("failed to upgrade to websocket connection, error: %s", err)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
defer conn.Close()
|
defer conn.Close()
|
||||||
for {
|
for {
|
||||||
mt, message, err := conn.ReadMessage()
|
mt, message, err := conn.ReadMessage()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logger.Errorf("websocket read message error: %s", err)
|
log.Error().Msgf("websocket read message error: %s", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := conn.WriteMessage(mt, message); err != nil {
|
if err := conn.WriteMessage(mt, message); err != nil {
|
||||||
logger.Errorf("websocket write message error: %s", err)
|
log.Error().Msgf("websocket write message error: %s", err)
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func sseHandler(logger logger.Service) http.HandlerFunc {
|
func sseHandler(log *zerolog.Logger) http.HandlerFunc {
|
||||||
return func(w http.ResponseWriter, r *http.Request) {
|
return func(w http.ResponseWriter, r *http.Request) {
|
||||||
w.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
|
w.Header().Set("Content-Type", "text/event-stream; charset=utf-8")
|
||||||
flusher, ok := w.(http.Flusher)
|
flusher, ok := w.(http.Flusher)
|
||||||
if !ok {
|
if !ok {
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
logger.Errorf("Can't support SSE. ResponseWriter %T doesn't implement http.Flusher interface", w)
|
log.Error().Msgf("Can't support SSE. ResponseWriter %T doesn't implement http.Flusher interface", w)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -204,7 +204,7 @@ func sseHandler(logger logger.Service) http.HandlerFunc {
|
||||||
freq = parsedFreq
|
freq = parsedFreq
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
logger.Infof("Server Sent Events every %s", freq)
|
log.Info().Msgf("Server Sent Events every %s", freq)
|
||||||
ticker := time.NewTicker(freq)
|
ticker := time.NewTicker(freq)
|
||||||
counter := 0
|
counter := 0
|
||||||
for {
|
for {
|
||||||
|
@ -247,9 +247,9 @@ func rootHandler(serverName string) http.HandlerFunc {
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
w.WriteHeader(http.StatusInternalServerError)
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
fmt.Fprintf(w, "error: %v", err)
|
_, _ = fmt.Fprintf(w, "error: %v", err)
|
||||||
} else {
|
} else {
|
||||||
buffer.WriteTo(w)
|
_, _ = buffer.WriteTo(w)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -9,11 +9,11 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
"github.com/urfave/cli/v2"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
@ -63,7 +63,7 @@ type Ingress struct {
|
||||||
|
|
||||||
// NewSingleOrigin constructs an Ingress set with only one rule, constructed from
|
// NewSingleOrigin constructs an Ingress set with only one rule, constructed from
|
||||||
// legacy CLI parameters like --url or --no-chunked-encoding.
|
// legacy CLI parameters like --url or --no-chunked-encoding.
|
||||||
func NewSingleOrigin(c *cli.Context, allowURLFromArgs bool, logger logger.Service) (Ingress, error) {
|
func NewSingleOrigin(c *cli.Context, allowURLFromArgs bool) (Ingress, error) {
|
||||||
|
|
||||||
service, err := parseSingleOriginService(c, allowURLFromArgs)
|
service, err := parseSingleOriginService(c, allowURLFromArgs)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
@ -113,10 +113,10 @@ func (ing Ingress) IsEmpty() bool {
|
||||||
}
|
}
|
||||||
|
|
||||||
// StartOrigins will start any origin services managed by cloudflared, e.g. proxy servers or Hello World.
|
// StartOrigins will start any origin services managed by cloudflared, e.g. proxy servers or Hello World.
|
||||||
func (ing Ingress) StartOrigins(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error) {
|
func (ing Ingress) StartOrigins(wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error) {
|
||||||
for _, rule := range ing.Rules {
|
for _, rule := range ing.Rules {
|
||||||
if err := rule.Service.start(wg, log, shutdownC, errC, rule.Config); err != nil {
|
if err := rule.Service.start(wg, log, shutdownC, errC, rule.Config); err != nil {
|
||||||
log.Errorf("Error starting local service %s: %s", rule.Service, err)
|
log.Error().Msgf("Error starting local service %s: %s", rule.Service, err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -14,7 +14,6 @@ import (
|
||||||
"gopkg.in/yaml.v2"
|
"gopkg.in/yaml.v2"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/config"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/tlsconfig"
|
"github.com/cloudflare/cloudflared/tlsconfig"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -329,9 +328,8 @@ func TestSingleOriginSetsConfig(t *testing.T) {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
allowURLFromArgs := false
|
allowURLFromArgs := false
|
||||||
logger, err := logger.New()
|
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
ingress, err := NewSingleOrigin(cliCtx, allowURLFromArgs, logger)
|
ingress, err := NewSingleOrigin(cliCtx, allowURLFromArgs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, time.Second, ingress.Rules[0].Config.ConnectTimeout)
|
assert.Equal(t, time.Second, ingress.Rules[0].Config.ConnectTimeout)
|
||||||
|
|
|
@ -13,12 +13,12 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/hello"
|
"github.com/cloudflare/cloudflared/hello"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/socks"
|
"github.com/cloudflare/cloudflared/socks"
|
||||||
"github.com/cloudflare/cloudflared/tlsconfig"
|
"github.com/cloudflare/cloudflared/tlsconfig"
|
||||||
"github.com/cloudflare/cloudflared/websocket"
|
"github.com/cloudflare/cloudflared/websocket"
|
||||||
gws "github.com/gorilla/websocket"
|
gws "github.com/gorilla/websocket"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// OriginService is something a tunnel can proxy traffic to.
|
// OriginService is something a tunnel can proxy traffic to.
|
||||||
|
@ -29,7 +29,7 @@ type OriginService interface {
|
||||||
// Start the origin service if it's managed by cloudflared, e.g. proxy servers or Hello World.
|
// Start the origin service if it's managed by cloudflared, e.g. proxy servers or Hello World.
|
||||||
// If it's not managed by cloudflared, this is a no-op because the user is responsible for
|
// If it's not managed by cloudflared, this is a no-op because the user is responsible for
|
||||||
// starting the origin service.
|
// starting the origin service.
|
||||||
start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error
|
start(wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error
|
||||||
}
|
}
|
||||||
|
|
||||||
// unixSocketPath is an OriginService representing a unix socket (which accepts HTTP)
|
// unixSocketPath is an OriginService representing a unix socket (which accepts HTTP)
|
||||||
|
@ -42,7 +42,7 @@ func (o *unixSocketPath) String() string {
|
||||||
return "unix socket: " + o.path
|
return "unix socket: " + o.path
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *unixSocketPath) start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error {
|
func (o *unixSocketPath) start(wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error {
|
||||||
transport, err := newHTTPTransport(o, cfg, log)
|
transport, err := newHTTPTransport(o, cfg, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -84,7 +84,7 @@ func (o *localService) Dial(reqURL *url.URL, headers http.Header) (*gws.Conn, *h
|
||||||
return d.Dial(reqURL.String(), headers)
|
return d.Dial(reqURL.String(), headers)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *localService) start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error {
|
func (o *localService) start(wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error {
|
||||||
transport, err := newHTTPTransport(o, cfg, log)
|
transport, err := newHTTPTransport(o, cfg, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -101,13 +101,13 @@ func (o *localService) start(wg *sync.WaitGroup, log logger.Service, shutdownC <
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *localService) startProxy(staticHost string, wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error {
|
func (o *localService) startProxy(staticHost string, wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error {
|
||||||
|
|
||||||
// Start a listener for the proxy
|
// Start a listener for the proxy
|
||||||
proxyAddress := net.JoinHostPort(cfg.ProxyAddress, strconv.Itoa(int(cfg.ProxyPort)))
|
proxyAddress := net.JoinHostPort(cfg.ProxyAddress, strconv.Itoa(int(cfg.ProxyPort)))
|
||||||
listener, err := net.Listen("tcp", proxyAddress)
|
listener, err := net.Listen("tcp", proxyAddress)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Errorf("Cannot start Websocket Proxy Server: %s", err)
|
log.Error().Msgf("Cannot start Websocket Proxy Server: %s", err)
|
||||||
return errors.Wrap(err, "Cannot start Websocket Proxy Server")
|
return errors.Wrap(err, "Cannot start Websocket Proxy Server")
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -119,18 +119,18 @@ func (o *localService) startProxy(staticHost string, wg *sync.WaitGroup, log log
|
||||||
// This origin's config specifies what type of proxy to start.
|
// This origin's config specifies what type of proxy to start.
|
||||||
switch cfg.ProxyType {
|
switch cfg.ProxyType {
|
||||||
case socksProxy:
|
case socksProxy:
|
||||||
log.Info("SOCKS5 server started")
|
log.Info().Msg("SOCKS5 server started")
|
||||||
streamHandler = func(wsConn *websocket.Conn, remoteConn net.Conn, _ http.Header) {
|
streamHandler = func(wsConn *websocket.Conn, remoteConn net.Conn, _ http.Header) {
|
||||||
dialer := socks.NewConnDialer(remoteConn)
|
dialer := socks.NewConnDialer(remoteConn)
|
||||||
requestHandler := socks.NewRequestHandler(dialer)
|
requestHandler := socks.NewRequestHandler(dialer)
|
||||||
socksServer := socks.NewConnectionHandler(requestHandler)
|
socksServer := socks.NewConnectionHandler(requestHandler)
|
||||||
|
|
||||||
socksServer.Serve(wsConn)
|
_ = socksServer.Serve(wsConn)
|
||||||
}
|
}
|
||||||
case "":
|
case "":
|
||||||
log.Debug("Not starting any websocket proxy")
|
log.Debug().Msg("Not starting any websocket proxy")
|
||||||
default:
|
default:
|
||||||
log.Errorf("%s isn't a valid proxy (valid options are {%s})", cfg.ProxyType, socksProxy)
|
log.Error().Msgf("%s isn't a valid proxy (valid options are {%s})", cfg.ProxyType, socksProxy)
|
||||||
}
|
}
|
||||||
|
|
||||||
errC <- websocket.StartProxyServer(log, listener, staticHost, shutdownC, streamHandler)
|
errC <- websocket.StartProxyServer(log, listener, staticHost, shutdownC, streamHandler)
|
||||||
|
@ -203,7 +203,13 @@ func (o *helloWorld) String() string {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Start starts a HelloWorld server and stores its address in the Service receiver.
|
// Start starts a HelloWorld server and stores its address in the Service receiver.
|
||||||
func (o *helloWorld) start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error {
|
func (o *helloWorld) start(
|
||||||
|
wg *sync.WaitGroup,
|
||||||
|
log *zerolog.Logger,
|
||||||
|
shutdownC <-chan struct{},
|
||||||
|
errC chan error,
|
||||||
|
cfg OriginRequestConfig,
|
||||||
|
) error {
|
||||||
transport, err := newHTTPTransport(o, cfg, log)
|
transport, err := newHTTPTransport(o, cfg, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
|
@ -261,7 +267,13 @@ func (o *statusCode) String() string {
|
||||||
return fmt.Sprintf("HTTP %d", o.resp.StatusCode)
|
return fmt.Sprintf("HTTP %d", o.resp.StatusCode)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (o *statusCode) start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error {
|
func (o *statusCode) start(
|
||||||
|
wg *sync.WaitGroup,
|
||||||
|
log *zerolog.Logger,
|
||||||
|
shutdownC <-chan struct{},
|
||||||
|
errC chan error,
|
||||||
|
cfg OriginRequestConfig,
|
||||||
|
) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -280,7 +292,7 @@ func (nrc *NopReadCloser) Close() error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func newHTTPTransport(service OriginService, cfg OriginRequestConfig, log logger.Service) (*http.Transport, error) {
|
func newHTTPTransport(service OriginService, cfg OriginRequestConfig, log *zerolog.Logger) (*http.Transport, error) {
|
||||||
originCertPool, err := tlsconfig.LoadOriginCA(cfg.CAPool, log)
|
originCertPool, err := tlsconfig.LoadOriginCA(cfg.CAPool, log)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, errors.Wrap(err, "Error loading cert pool")
|
return nil, errors.Wrap(err, "Error loading cert pool")
|
||||||
|
@ -338,6 +350,6 @@ func (mos MockOriginService) String() string {
|
||||||
return "MockOriginService"
|
return "MockOriginService"
|
||||||
}
|
}
|
||||||
|
|
||||||
func (mos MockOriginService) start(wg *sync.WaitGroup, log logger.Service, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error {
|
func (mos MockOriginService) start(wg *sync.WaitGroup, log *zerolog.Logger, shutdownC <-chan struct{}, errC chan error, cfg OriginRequestConfig) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
|
@ -29,7 +29,7 @@ type RollingConfig struct {
|
||||||
}
|
}
|
||||||
|
|
||||||
func createDefaultConfig() Config {
|
func createDefaultConfig() Config {
|
||||||
const minLevel = "fatal"
|
const minLevel = "info"
|
||||||
|
|
||||||
const RollingMaxSize = 1 // Mb
|
const RollingMaxSize = 1 // Mb
|
||||||
const RollingMaxBackups = 5 // files
|
const RollingMaxBackups = 5 // files
|
||||||
|
@ -57,7 +57,7 @@ func createDefaultConfig() Config {
|
||||||
func CreateConfig(
|
func CreateConfig(
|
||||||
minLevel string,
|
minLevel string,
|
||||||
disableTerminal bool,
|
disableTerminal bool,
|
||||||
rollingLogPath, nonRollingLogFilePath string,
|
rollingLogPath, rollingLogFilename, nonRollingLogFilePath string,
|
||||||
) *Config {
|
) *Config {
|
||||||
var console *ConsoleConfig
|
var console *ConsoleConfig
|
||||||
if !disableTerminal {
|
if !disableTerminal {
|
||||||
|
@ -71,7 +71,7 @@ func CreateConfig(
|
||||||
|
|
||||||
var rolling *RollingConfig
|
var rolling *RollingConfig
|
||||||
if rollingLogPath != "" {
|
if rollingLogPath != "" {
|
||||||
rolling = createRollingConfig(rollingLogPath)
|
rolling = createRollingConfig(rollingLogPath, rollingLogFilename)
|
||||||
}
|
}
|
||||||
|
|
||||||
if minLevel == "" {
|
if minLevel == "" {
|
||||||
|
@ -103,14 +103,14 @@ func createFileConfig(filepath string) *FileConfig {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func createRollingConfig(directory string) *RollingConfig {
|
func createRollingConfig(directory, filename string) *RollingConfig {
|
||||||
if directory == "" {
|
if directory == "" {
|
||||||
directory = defaultConfig.RollingConfig.Directory
|
directory = defaultConfig.RollingConfig.Directory
|
||||||
}
|
}
|
||||||
|
|
||||||
return &RollingConfig{
|
return &RollingConfig{
|
||||||
Directory: directory,
|
Directory: directory,
|
||||||
Filename: defaultConfig.RollingConfig.Filename,
|
Filename: filename,
|
||||||
maxSize: defaultConfig.RollingConfig.maxSize,
|
maxSize: defaultConfig.RollingConfig.maxSize,
|
||||||
maxBackups: defaultConfig.RollingConfig.maxBackups,
|
maxBackups: defaultConfig.RollingConfig.maxBackups,
|
||||||
maxAge: defaultConfig.RollingConfig.maxAge,
|
maxAge: defaultConfig.RollingConfig.maxAge,
|
||||||
|
|
203
logger/create.go
203
logger/create.go
|
@ -1,13 +1,11 @@
|
||||||
package logger
|
package logger
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"io"
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/alecthomas/units"
|
"github.com/rs/zerolog"
|
||||||
|
fallbacklog "github.com/rs/zerolog/log"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -24,162 +22,40 @@ const (
|
||||||
LogSSHLevelFlag = "log-level"
|
LogSSHLevelFlag = "log-level"
|
||||||
)
|
)
|
||||||
|
|
||||||
// Option is to encaspulate actions that will be called by Parse and run later to build an Options struct
|
func newZerolog(loggerConfig *Config) *zerolog.Logger {
|
||||||
type Option func(*Options) error
|
var writers []io.Writer
|
||||||
|
|
||||||
// Options is use to set logging configuration data
|
if loggerConfig.ConsoleConfig != nil {
|
||||||
type Options struct {
|
writers = append(writers, zerolog.ConsoleWriter{
|
||||||
logFileDirectory string
|
Out: os.Stderr,
|
||||||
maxFileSize units.Base2Bytes
|
NoColor: loggerConfig.ConsoleConfig.noColor,
|
||||||
maxFileCount uint
|
})
|
||||||
terminalOutputDisabled bool
|
|
||||||
supportedFileLevels []Level
|
|
||||||
supportedTerminalLevels []Level
|
|
||||||
}
|
|
||||||
|
|
||||||
// DisableTerminal stops terminal output for the logger
|
|
||||||
func DisableTerminal(disable bool) Option {
|
|
||||||
return func(c *Options) error {
|
|
||||||
c.terminalOutputDisabled = disable
|
|
||||||
return nil
|
|
||||||
}
|
}
|
||||||
}
|
|
||||||
|
|
||||||
// File sets a custom file to log events
|
// TODO TUN-3472: Support file writer and log rotation
|
||||||
func File(path string, size units.Base2Bytes, count uint) Option {
|
|
||||||
return func(c *Options) error {
|
|
||||||
c.logFileDirectory = path
|
|
||||||
c.maxFileSize = size
|
|
||||||
c.maxFileCount = count
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultFile configures the log options will the defaults
|
multi := zerolog.MultiLevelWriter(writers...)
|
||||||
func DefaultFile(directoryPath string) Option {
|
|
||||||
return func(c *Options) error {
|
|
||||||
size, err := units.ParseBase2Bytes("1MB")
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
c.logFileDirectory = directoryPath
|
|
||||||
c.maxFileSize = size
|
|
||||||
c.maxFileCount = 5
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SupportedFileLevels sets the supported logging levels for the log file
|
|
||||||
func SupportedFileLevels(supported []Level) Option {
|
|
||||||
return func(c *Options) error {
|
|
||||||
c.supportedFileLevels = supported
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// SupportedTerminalevels sets the supported logging levels for the terminal output
|
|
||||||
func SupportedTerminalevels(supported []Level) Option {
|
|
||||||
return func(c *Options) error {
|
|
||||||
c.supportedTerminalLevels = supported
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// LogLevelString sets the supported logging levels from a command line flag
|
|
||||||
func LogLevelString(level string) Option {
|
|
||||||
return func(c *Options) error {
|
|
||||||
supported, err := ParseLevelString(level)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
c.supportedFileLevels = supported
|
|
||||||
c.supportedTerminalLevels = supported
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Parse builds the Options struct so the caller knows what actions should be run
|
|
||||||
func Parse(opts ...Option) (*Options, error) {
|
|
||||||
options := &Options{}
|
|
||||||
for _, opt := range opts {
|
|
||||||
if err := opt(options); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return options, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// New setups a new logger based on the options.
|
|
||||||
// The default behavior is to write to standard out
|
|
||||||
func New(opts ...Option) (*OutputWriter, error) {
|
|
||||||
options, err := Parse(opts...)
|
|
||||||
|
|
||||||
|
level, err := zerolog.ParseLevel(loggerConfig.MinLevel)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
failLog := fallbacklog.With().Logger()
|
||||||
|
fallbacklog.Error().Msgf("Falling back to a default logger due to logger setup failure: %s", err)
|
||||||
|
return &failLog
|
||||||
}
|
}
|
||||||
|
log := zerolog.New(multi).With().Timestamp().Logger().Level(level)
|
||||||
|
|
||||||
l := NewOutputWriter(SharedWriteManager)
|
return &log
|
||||||
if options.logFileDirectory != "" {
|
|
||||||
l.Add(NewFileRollingWriter(SanitizeLogPath(options.logFileDirectory),
|
|
||||||
"cloudflared",
|
|
||||||
int64(options.maxFileSize),
|
|
||||||
options.maxFileCount),
|
|
||||||
NewDefaultFormatter(time.RFC3339Nano), options.supportedFileLevels...)
|
|
||||||
}
|
|
||||||
|
|
||||||
if !options.terminalOutputDisabled {
|
|
||||||
terminalFormatter := NewTerminalFormatter(time.RFC3339)
|
|
||||||
|
|
||||||
if len(options.supportedTerminalLevels) == 0 {
|
|
||||||
l.Add(os.Stderr, terminalFormatter, InfoLevel, ErrorLevel, FatalLevel)
|
|
||||||
} else {
|
|
||||||
l.Add(os.Stderr, terminalFormatter, options.supportedTerminalLevels...)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return l, nil
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewInHouse(loggerConfig *Config) (*OutputWriter, error) {
|
func CreateTransportLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
|
||||||
var loggerOpts []Option
|
|
||||||
|
|
||||||
var logPath string
|
|
||||||
if loggerConfig.FileConfig != nil {
|
|
||||||
logPath = loggerConfig.FileConfig.Filepath
|
|
||||||
}
|
|
||||||
if logPath == "" && loggerConfig.RollingConfig != nil {
|
|
||||||
logPath = loggerConfig.RollingConfig.Directory
|
|
||||||
}
|
|
||||||
|
|
||||||
if logPath != "" {
|
|
||||||
loggerOpts = append(loggerOpts, DefaultFile(logPath))
|
|
||||||
}
|
|
||||||
|
|
||||||
loggerOpts = append(loggerOpts, LogLevelString(loggerConfig.MinLevel))
|
|
||||||
|
|
||||||
if loggerConfig.ConsoleConfig == nil {
|
|
||||||
disableOption := DisableTerminal(true)
|
|
||||||
loggerOpts = append(loggerOpts, disableOption)
|
|
||||||
}
|
|
||||||
|
|
||||||
l, err := New(loggerOpts...)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return l, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func CreateTransportLoggerFromContext(c *cli.Context, disableTerminal bool) (*OutputWriter, error) {
|
|
||||||
return createFromContext(c, LogTransportLevelFlag, LogDirectoryFlag, disableTerminal)
|
return createFromContext(c, LogTransportLevelFlag, LogDirectoryFlag, disableTerminal)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateLoggerFromContext(c *cli.Context, disableTerminal bool) (*OutputWriter, error) {
|
func CreateLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
|
||||||
return createFromContext(c, LogLevelFlag, LogDirectoryFlag, disableTerminal)
|
return createFromContext(c, LogLevelFlag, LogDirectoryFlag, disableTerminal)
|
||||||
}
|
}
|
||||||
|
|
||||||
func CreateSSHLoggerFromContext(c *cli.Context, disableTerminal bool) (*OutputWriter, error) {
|
func CreateSSHLoggerFromContext(c *cli.Context, disableTerminal bool) *zerolog.Logger {
|
||||||
return createFromContext(c, LogSSHLevelFlag, LogSSHDirectoryFlag, disableTerminal)
|
return createFromContext(c, LogSSHLevelFlag, LogSSHDirectoryFlag, disableTerminal)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -188,37 +64,26 @@ func createFromContext(
|
||||||
logLevelFlagName,
|
logLevelFlagName,
|
||||||
logDirectoryFlagName string,
|
logDirectoryFlagName string,
|
||||||
disableTerminal bool,
|
disableTerminal bool,
|
||||||
) (*OutputWriter, error) {
|
) *zerolog.Logger {
|
||||||
logLevel := c.String(logLevelFlagName)
|
logLevel := c.String(logLevelFlagName)
|
||||||
logFile := c.String(LogFileFlag)
|
logFile := c.String(LogFileFlag)
|
||||||
logDirectory := c.String(logDirectoryFlagName)
|
logDirectory := c.String(logDirectoryFlagName)
|
||||||
|
|
||||||
loggerConfig := CreateConfig(logLevel, disableTerminal, logDirectory, logFile)
|
loggerConfig := CreateConfig(
|
||||||
|
logLevel,
|
||||||
|
disableTerminal,
|
||||||
|
logDirectory,
|
||||||
|
defaultConfig.RollingConfig.Filename,
|
||||||
|
logFile,
|
||||||
|
)
|
||||||
|
|
||||||
return NewInHouse(loggerConfig)
|
return newZerolog(loggerConfig)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ParseLevelString returns the expected log levels based on the cmd flag
|
func Create(loggerConfig *Config) *zerolog.Logger {
|
||||||
func ParseLevelString(lvl string) ([]Level, error) {
|
if loggerConfig == nil {
|
||||||
switch strings.ToLower(lvl) {
|
loggerConfig = &defaultConfig
|
||||||
case "fatal":
|
|
||||||
return []Level{FatalLevel}, nil
|
|
||||||
case "error":
|
|
||||||
return []Level{FatalLevel, ErrorLevel}, nil
|
|
||||||
case "info", "warn":
|
|
||||||
return []Level{FatalLevel, ErrorLevel, InfoLevel}, nil
|
|
||||||
case "debug":
|
|
||||||
return []Level{FatalLevel, ErrorLevel, InfoLevel, DebugLevel}, nil
|
|
||||||
}
|
}
|
||||||
return []Level{}, fmt.Errorf("not a valid log level: %q", lvl)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SanitizeLogPath checks that the logger log path
|
return newZerolog(loggerConfig)
|
||||||
func SanitizeLogPath(path string) string {
|
|
||||||
newPath := strings.TrimSpace(path)
|
|
||||||
// make sure it has a log file extension and is not a directory
|
|
||||||
if filepath.Ext(newPath) != ".log" && !(isDirectory(newPath) || strings.HasSuffix(newPath, "/")) {
|
|
||||||
newPath = newPath + ".log"
|
|
||||||
}
|
|
||||||
return newPath
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,46 +0,0 @@
|
||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLogLevelParse(t *testing.T) {
|
|
||||||
lvls, err := ParseLevelString("fatal")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []Level{FatalLevel}, lvls)
|
|
||||||
|
|
||||||
lvls, err = ParseLevelString("error")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []Level{FatalLevel, ErrorLevel}, lvls)
|
|
||||||
|
|
||||||
lvls, err = ParseLevelString("info")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel}, lvls)
|
|
||||||
|
|
||||||
lvls, err = ParseLevelString("info")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel}, lvls)
|
|
||||||
|
|
||||||
lvls, err = ParseLevelString("warn")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel}, lvls)
|
|
||||||
|
|
||||||
lvls, err = ParseLevelString("debug")
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, []Level{FatalLevel, ErrorLevel, InfoLevel, DebugLevel}, lvls)
|
|
||||||
|
|
||||||
_, err = ParseLevelString("blah")
|
|
||||||
assert.Error(t, err)
|
|
||||||
|
|
||||||
_, err = ParseLevelString("")
|
|
||||||
assert.Error(t, err)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestPathSanitizer(t *testing.T) {
|
|
||||||
assert.Equal(t, "somebad/path/log.bat.log", SanitizeLogPath("\t somebad/path/log.bat\n\n"))
|
|
||||||
assert.Equal(t, "proper/path/cloudflared.log", SanitizeLogPath("proper/path/cloudflared.log"))
|
|
||||||
assert.Equal(t, "proper/path/", SanitizeLogPath("proper/path/"))
|
|
||||||
assert.Equal(t, "proper/path/cloudflared.log", SanitizeLogPath("\tproper/path/cloudflared\n\n"))
|
|
||||||
}
|
|
|
@ -1,125 +0,0 @@
|
||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
)
|
|
||||||
|
|
||||||
// FileRollingWriter maintains a set of log files numbered in order
|
|
||||||
// to keep a subset of log data to ensure it doesn't grow pass defined limits
|
|
||||||
type FileRollingWriter struct {
|
|
||||||
baseFileName string
|
|
||||||
directory string
|
|
||||||
maxFileSize int64
|
|
||||||
maxFileCount uint
|
|
||||||
fileHandle *os.File
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewFileRollingWriter creates a new rolling file writer.
|
|
||||||
// directory is the working directory for the files
|
|
||||||
// baseFileName is the log file name. This writer appends .log to the name for the file name
|
|
||||||
// maxFileSize is the size in bytes of how large each file can be. Not a hard limit, general limit based after each write
|
|
||||||
// maxFileCount is the number of rolled files to keep.
|
|
||||||
func NewFileRollingWriter(directory, baseFileName string, maxFileSize int64, maxFileCount uint) *FileRollingWriter {
|
|
||||||
return &FileRollingWriter{
|
|
||||||
directory: directory,
|
|
||||||
baseFileName: baseFileName,
|
|
||||||
maxFileSize: maxFileSize,
|
|
||||||
maxFileCount: maxFileCount,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write is an implementation of io.writer the rolls the file once it reaches its max size
|
|
||||||
// It is expected the caller to Write is doing so in a thread safe manner (as WriteManager does).
|
|
||||||
func (w *FileRollingWriter) Write(p []byte) (n int, err error) {
|
|
||||||
logFile, isSingleFile := buildPath(w.directory, w.baseFileName)
|
|
||||||
if w.fileHandle == nil {
|
|
||||||
h, err := os.OpenFile(logFile, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0664)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
w.fileHandle = h
|
|
||||||
}
|
|
||||||
|
|
||||||
// get size for rolling check
|
|
||||||
info, err := w.fileHandle.Stat()
|
|
||||||
if err != nil {
|
|
||||||
// failed to stat the file. Close the file handle and attempt to open a new handle on the next write
|
|
||||||
w.Close()
|
|
||||||
w.fileHandle = nil
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// write to the file
|
|
||||||
written, err := w.fileHandle.Write(p)
|
|
||||||
|
|
||||||
// check if the file needs to be rolled
|
|
||||||
if err == nil && info.Size()+int64(written) > w.maxFileSize && !isSingleFile {
|
|
||||||
// close the file handle than do the renaming. A new one will be opened on the next write
|
|
||||||
w.Close()
|
|
||||||
w.rename(logFile, 1)
|
|
||||||
}
|
|
||||||
|
|
||||||
return written, err
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the file handle if it is open
|
|
||||||
func (w *FileRollingWriter) Close() {
|
|
||||||
if w.fileHandle != nil {
|
|
||||||
w.fileHandle.Close()
|
|
||||||
w.fileHandle = nil
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// rename is how the files are rolled. It works recursively to move the base log file to the rolled ones
|
|
||||||
// e.g. cloudflared.log -> cloudflared-1.log,
|
|
||||||
// but if cloudflared-1.log already exists, it is renamed to cloudflared-2.log,
|
|
||||||
// then the other files move in to their postion
|
|
||||||
func (w *FileRollingWriter) rename(sourcePath string, index uint) {
|
|
||||||
destinationPath, isSingleFile := buildPath(w.directory, fmt.Sprintf("%s-%d", w.baseFileName, index))
|
|
||||||
if isSingleFile {
|
|
||||||
return //don't need to rename anything, it is a single file
|
|
||||||
}
|
|
||||||
|
|
||||||
// rolled to the max amount of files allowed on disk
|
|
||||||
if index >= w.maxFileCount {
|
|
||||||
os.Remove(destinationPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// if the rolled path already exist, rename it to cloudflared-2.log, then do this one.
|
|
||||||
// recursive call since the oldest one needs to be renamed, before the newer ones can be moved
|
|
||||||
if exists(destinationPath) {
|
|
||||||
w.rename(destinationPath, index+1)
|
|
||||||
}
|
|
||||||
|
|
||||||
os.Rename(sourcePath, destinationPath)
|
|
||||||
}
|
|
||||||
|
|
||||||
// return the path to the log file and if it is a single file or not.
|
|
||||||
// true means a single file. false means a rolled file
|
|
||||||
func buildPath(directory, fileName string) (string, bool) {
|
|
||||||
if !isDirectory(directory) { // not a directory, so try and treat it as a single file for backwards compatibility sake
|
|
||||||
return directory, true
|
|
||||||
}
|
|
||||||
return filepath.Join(directory, fileName+".log"), false
|
|
||||||
}
|
|
||||||
|
|
||||||
func exists(filePath string) bool {
|
|
||||||
if _, err := os.Stat(filePath); os.IsNotExist(err) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
func isDirectory(path string) bool {
|
|
||||||
if path == "" {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
fileInfo, err := os.Stat(path)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
return fileInfo.IsDir()
|
|
||||||
}
|
|
|
@ -1,108 +0,0 @@
|
||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestFileWrite(t *testing.T) {
|
|
||||||
fileName := "test_file"
|
|
||||||
fileLog := fileName + ".log"
|
|
||||||
testData := []byte(string("hello Dalton, how are you doing?"))
|
|
||||||
defer func() {
|
|
||||||
os.Remove(fileLog)
|
|
||||||
}()
|
|
||||||
|
|
||||||
w := NewFileRollingWriter("", fileName, 1000, 2)
|
|
||||||
defer w.Close()
|
|
||||||
|
|
||||||
l, err := w.Write(testData)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, l, len(testData), "expected write length and data length to match")
|
|
||||||
|
|
||||||
d, err := ioutil.ReadFile(fileLog)
|
|
||||||
assert.FileExists(t, fileLog, "file doesn't exist at expected path")
|
|
||||||
assert.Equal(t, d, testData, "expected data in file to match test data")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRolling(t *testing.T) {
|
|
||||||
dirName := "testdir"
|
|
||||||
err := os.Mkdir(dirName, 0755)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
fileName := "test_file"
|
|
||||||
firstFile := filepath.Join(dirName, fileName+".log")
|
|
||||||
secondFile := filepath.Join(dirName, fileName+"-1.log")
|
|
||||||
thirdFile := filepath.Join(dirName, fileName+"-2.log")
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
os.RemoveAll(dirName)
|
|
||||||
os.Remove(firstFile)
|
|
||||||
os.Remove(secondFile)
|
|
||||||
os.Remove(thirdFile)
|
|
||||||
}()
|
|
||||||
|
|
||||||
w := NewFileRollingWriter(dirName, fileName, 1000, 2)
|
|
||||||
defer w.Close()
|
|
||||||
|
|
||||||
for i := 99; i >= 1; i-- {
|
|
||||||
testData := []byte(fmt.Sprintf("%d bottles of beer on the wall...", i))
|
|
||||||
w.Write(testData)
|
|
||||||
}
|
|
||||||
assert.FileExists(t, firstFile, "first file doesn't exist as expected")
|
|
||||||
assert.FileExists(t, secondFile, "second file doesn't exist as expected")
|
|
||||||
assert.FileExists(t, thirdFile, "third file doesn't exist as expected")
|
|
||||||
assert.False(t, exists(filepath.Join(dirName, fileName+"-3.log")), "limited to two files and there is more")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSingleFile(t *testing.T) {
|
|
||||||
fileName := "test_file"
|
|
||||||
testData := []byte(string("hello Dalton, how are you doing?"))
|
|
||||||
defer func() {
|
|
||||||
os.Remove(fileName)
|
|
||||||
}()
|
|
||||||
|
|
||||||
w := NewFileRollingWriter(fileName, fileName, 1000, 2)
|
|
||||||
defer w.Close()
|
|
||||||
|
|
||||||
l, err := w.Write(testData)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, l, len(testData), "expected write length and data length to match")
|
|
||||||
|
|
||||||
d, err := ioutil.ReadFile(fileName)
|
|
||||||
assert.FileExists(t, fileName, "file doesn't exist at expected path")
|
|
||||||
assert.Equal(t, d, testData, "expected data in file to match test data")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSingleFileInDirectory(t *testing.T) {
|
|
||||||
dirName := "testdir"
|
|
||||||
err := os.Mkdir(dirName, 0755)
|
|
||||||
assert.NoError(t, err)
|
|
||||||
|
|
||||||
fileName := "test_file"
|
|
||||||
fullPath := filepath.Join(dirName, fileName+".log")
|
|
||||||
testData := []byte(string("hello Dalton, how are you doing?"))
|
|
||||||
defer func() {
|
|
||||||
os.Remove(fullPath)
|
|
||||||
os.RemoveAll(dirName)
|
|
||||||
}()
|
|
||||||
|
|
||||||
w := NewFileRollingWriter(fullPath, fileName, 1000, 2)
|
|
||||||
defer w.Close()
|
|
||||||
|
|
||||||
l, err := w.Write(testData)
|
|
||||||
|
|
||||||
assert.NoError(t, err)
|
|
||||||
assert.Equal(t, l, len(testData), "expected write length and data length to match")
|
|
||||||
|
|
||||||
d, err := ioutil.ReadFile(fullPath)
|
|
||||||
assert.FileExists(t, fullPath, "file doesn't exist at expected path")
|
|
||||||
assert.Equal(t, d, testData, "expected data in file to match test data")
|
|
||||||
}
|
|
|
@ -1,138 +0,0 @@
|
||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"runtime"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/acmacalister/skittles"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Level of logging, lower number means more verbose logging, higher more terse
|
|
||||||
type Level int
|
|
||||||
|
|
||||||
const (
|
|
||||||
// DebugLevel is for messages that are intended for purposes debugging only
|
|
||||||
DebugLevel Level = iota
|
|
||||||
|
|
||||||
// InfoLevel is for standard log messages
|
|
||||||
InfoLevel
|
|
||||||
|
|
||||||
// ErrorLevel is for error message to indicate something has gone wrong
|
|
||||||
ErrorLevel
|
|
||||||
|
|
||||||
// FatalLevel is for error message that log and kill the program with an os.exit(1)
|
|
||||||
FatalLevel
|
|
||||||
)
|
|
||||||
|
|
||||||
// Formatter is the base interface for formatting logging messages before writing them out
|
|
||||||
type Formatter interface {
|
|
||||||
Timestamp(Level, time.Time) string // format the timestamp string
|
|
||||||
Content(Level, string) string // format content string (color for terminal, etc)
|
|
||||||
}
|
|
||||||
|
|
||||||
// DefaultFormatter writes a simple structure timestamp and the message per log line
|
|
||||||
type DefaultFormatter struct {
|
|
||||||
format string
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewDefaultFormatter creates the standard log formatter
|
|
||||||
// format is the time format to use for timestamp formatting
|
|
||||||
func NewDefaultFormatter(format string) Formatter {
|
|
||||||
return &DefaultFormatter{
|
|
||||||
format: format,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timestamp formats a log line timestamp with a brackets around them
|
|
||||||
func (f *DefaultFormatter) Timestamp(l Level, d time.Time) string {
|
|
||||||
if f.format == "" {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return fmt.Sprintf("[%s]: ", d.Format(f.format))
|
|
||||||
}
|
|
||||||
|
|
||||||
// Content just writes the log line straight to the sources
|
|
||||||
func (f *DefaultFormatter) Content(l Level, c string) string {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// TerminalFormatter is setup for colored output
|
|
||||||
type TerminalFormatter struct {
|
|
||||||
format string
|
|
||||||
supportsColor bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// UIFormatter is used for streaming logs to UI
|
|
||||||
type UIFormatter struct {
|
|
||||||
format string
|
|
||||||
supportsColor bool
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewTerminalFormatter creates a Terminal formatter for colored output
|
|
||||||
// format is the time format to use for timestamp formatting
|
|
||||||
func NewTerminalFormatter(format string) Formatter {
|
|
||||||
supportsColor := (runtime.GOOS != "windows")
|
|
||||||
return &TerminalFormatter{
|
|
||||||
format: format,
|
|
||||||
supportsColor: supportsColor,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewUIFormatter(format string) Formatter {
|
|
||||||
supportsColor := (runtime.GOOS != "windows")
|
|
||||||
return &UIFormatter{
|
|
||||||
format: format,
|
|
||||||
supportsColor: supportsColor,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timestamp uses formatting that is tview-specific for UI
|
|
||||||
func (f *UIFormatter) Timestamp(l Level, d time.Time) string {
|
|
||||||
t := ""
|
|
||||||
dateStr := "[" + d.Format(f.format) + "] "
|
|
||||||
switch l {
|
|
||||||
case InfoLevel:
|
|
||||||
t = "[#00ffff]INFO[white]"
|
|
||||||
case ErrorLevel:
|
|
||||||
t = "[red]ERROR[white]"
|
|
||||||
case DebugLevel:
|
|
||||||
t = "[yellow]DEBUG[white]"
|
|
||||||
case FatalLevel:
|
|
||||||
t = "[red]FATAL[white]"
|
|
||||||
}
|
|
||||||
return t + dateStr
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *UIFormatter) Content(l Level, c string) string {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
// Timestamp returns the log level with a matching color to the log type
|
|
||||||
func (f *TerminalFormatter) Timestamp(l Level, d time.Time) string {
|
|
||||||
t := ""
|
|
||||||
dateStr := "[" + d.Format(f.format) + "] "
|
|
||||||
switch l {
|
|
||||||
case InfoLevel:
|
|
||||||
t = f.output("INFO", skittles.Cyan)
|
|
||||||
case ErrorLevel:
|
|
||||||
t = f.output("ERROR", skittles.Red)
|
|
||||||
case DebugLevel:
|
|
||||||
t = f.output("DEBUG", skittles.Yellow)
|
|
||||||
case FatalLevel:
|
|
||||||
t = f.output("FATAL", skittles.Red)
|
|
||||||
}
|
|
||||||
return t + dateStr
|
|
||||||
}
|
|
||||||
|
|
||||||
// Content just writes the log line straight to the sources
|
|
||||||
func (f *TerminalFormatter) Content(l Level, c string) string {
|
|
||||||
return c
|
|
||||||
}
|
|
||||||
|
|
||||||
func (f *TerminalFormatter) output(msg string, colorFunc func(interface{}) string) string {
|
|
||||||
if f.supportsColor {
|
|
||||||
return colorFunc(msg)
|
|
||||||
}
|
|
||||||
return msg
|
|
||||||
}
|
|
|
@ -1,59 +0,0 @@
|
||||||
package logger
|
|
||||||
|
|
||||||
import "sync"
|
|
||||||
|
|
||||||
// SharedWriteManager is a package level variable to allows multiple loggers to use the same write manager.
|
|
||||||
// This is useful when multiple loggers will write to the same file to ensure they don't clobber each other.
|
|
||||||
var SharedWriteManager = NewWriteManager()
|
|
||||||
|
|
||||||
type writeData struct {
|
|
||||||
target LogOutput
|
|
||||||
data []byte
|
|
||||||
}
|
|
||||||
|
|
||||||
// WriteManager is a logging service that handles managing multiple writing streams
|
|
||||||
type WriteManager struct {
|
|
||||||
shutdown chan struct{}
|
|
||||||
writeChan chan writeData
|
|
||||||
writers map[string]Service
|
|
||||||
wg sync.WaitGroup
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewWriteManager creates a write manager that implements OutputManager
|
|
||||||
func NewWriteManager() OutputManager {
|
|
||||||
m := &WriteManager{
|
|
||||||
shutdown: make(chan struct{}),
|
|
||||||
writeChan: make(chan writeData, 1000),
|
|
||||||
}
|
|
||||||
|
|
||||||
go m.run()
|
|
||||||
return m
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append adds a message to the writer runloop
|
|
||||||
func (m *WriteManager) Append(data []byte, target LogOutput) {
|
|
||||||
m.wg.Add(1)
|
|
||||||
m.writeChan <- writeData{data: data, target: target}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown stops the sync manager service
|
|
||||||
func (m *WriteManager) Shutdown() {
|
|
||||||
m.wg.Wait()
|
|
||||||
close(m.shutdown)
|
|
||||||
close(m.writeChan)
|
|
||||||
}
|
|
||||||
|
|
||||||
// run is the main runloop that schedules log messages
|
|
||||||
func (m *WriteManager) run() {
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case event, ok := <-m.writeChan:
|
|
||||||
if ok {
|
|
||||||
event.target.WriteLogLine(event.data)
|
|
||||||
m.wg.Done()
|
|
||||||
}
|
|
||||||
case <-m.shutdown:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,24 +0,0 @@
|
||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"testing"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
type outputFunc func(b []byte)
|
|
||||||
|
|
||||||
func (f outputFunc) WriteLogLine(data []byte) {
|
|
||||||
f(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestWriteManger(t *testing.T) {
|
|
||||||
testData := []byte(string("hello Austin, how are you doing?"))
|
|
||||||
waitChan := make(chan []byte)
|
|
||||||
m := NewWriteManager()
|
|
||||||
m.Append(testData, outputFunc(func(b []byte) {
|
|
||||||
waitChan <- b
|
|
||||||
}))
|
|
||||||
resp := <-waitChan
|
|
||||||
assert.Equal(t, testData, resp)
|
|
||||||
}
|
|
|
@ -1,18 +0,0 @@
|
||||||
package logger
|
|
||||||
|
|
||||||
// MockWriteManager does nothing and is provided for testing purposes
|
|
||||||
type MockWriteManager struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewMockWriteManager creates an OutputManager that does nothing for testing purposes
|
|
||||||
func NewMockWriteManager() OutputManager {
|
|
||||||
return &MockWriteManager{}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Append is a mock stub
|
|
||||||
func (m *MockWriteManager) Append(data []byte, target LogOutput) {
|
|
||||||
}
|
|
||||||
|
|
||||||
// Shutdown is a mock stub
|
|
||||||
func (m *MockWriteManager) Shutdown() {
|
|
||||||
}
|
|
157
logger/output.go
157
logger/output.go
|
@ -1,157 +0,0 @@
|
||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"os"
|
|
||||||
"time"
|
|
||||||
)
|
|
||||||
|
|
||||||
// provided for testing
|
|
||||||
var osExit = os.Exit
|
|
||||||
|
|
||||||
type LogOutput interface {
|
|
||||||
WriteLogLine([]byte)
|
|
||||||
}
|
|
||||||
|
|
||||||
// OutputManager is used to sync data of Output
|
|
||||||
type OutputManager interface {
|
|
||||||
Append([]byte, LogOutput)
|
|
||||||
Shutdown()
|
|
||||||
}
|
|
||||||
|
|
||||||
// Service is the logging service that is either a group or single log writer
|
|
||||||
type Service interface {
|
|
||||||
Error(message string)
|
|
||||||
Info(message string)
|
|
||||||
Debug(message string)
|
|
||||||
Fatal(message string)
|
|
||||||
|
|
||||||
Errorf(format string, args ...interface{})
|
|
||||||
Infof(format string, args ...interface{})
|
|
||||||
Debugf(format string, args ...interface{})
|
|
||||||
Fatalf(format string, args ...interface{})
|
|
||||||
|
|
||||||
Add(writer io.Writer, formatter Formatter, levels ...Level)
|
|
||||||
}
|
|
||||||
|
|
||||||
type sourceGroup struct {
|
|
||||||
writer io.Writer
|
|
||||||
formatter Formatter
|
|
||||||
levelsSupported []Level
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *sourceGroup) WriteLogLine(data []byte) {
|
|
||||||
_, _ = s.writer.Write(data)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *sourceGroup) supportsLevel(l Level) bool {
|
|
||||||
for _, level := range s.levelsSupported {
|
|
||||||
if l == level {
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
// OutputWriter is the standard logging implementation
|
|
||||||
type OutputWriter struct {
|
|
||||||
groups []*sourceGroup
|
|
||||||
syncWriter OutputManager
|
|
||||||
minLevel Level
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewOutputWriter creates a new logger
|
|
||||||
func NewOutputWriter(syncWriter OutputManager) *OutputWriter {
|
|
||||||
return &OutputWriter{
|
|
||||||
syncWriter: syncWriter,
|
|
||||||
groups: nil,
|
|
||||||
minLevel: FatalLevel,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add a writer and formatter to output to
|
|
||||||
func (s *OutputWriter) Add(writer io.Writer, formatter Formatter, levels ...Level) {
|
|
||||||
s.groups = append(s.groups, &sourceGroup{writer: writer, formatter: formatter, levelsSupported: levels})
|
|
||||||
|
|
||||||
// track most verbose (lowest) level we need to output
|
|
||||||
for _, level := range levels {
|
|
||||||
if level < s.minLevel {
|
|
||||||
s.minLevel = level
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Error writes an error to the logging sources
|
|
||||||
func (s *OutputWriter) Error(message string) {
|
|
||||||
if s.minLevel <= ErrorLevel {
|
|
||||||
s.output(ErrorLevel, message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Info writes an info string to the logging sources
|
|
||||||
func (s *OutputWriter) Info(message string) {
|
|
||||||
if s.minLevel <= InfoLevel {
|
|
||||||
s.output(InfoLevel, message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debug writes a debug string to the logging sources
|
|
||||||
func (s *OutputWriter) Debug(message string) {
|
|
||||||
if s.minLevel <= DebugLevel {
|
|
||||||
s.output(DebugLevel, message)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatal writes a error string to the logging sources and runs does an os.exit()
|
|
||||||
func (s *OutputWriter) Fatal(message string) {
|
|
||||||
s.output(FatalLevel, message)
|
|
||||||
s.syncWriter.Shutdown() // waits for the pending logging to finish
|
|
||||||
osExit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Errorf writes a formatted error to the logging sources
|
|
||||||
func (s *OutputWriter) Errorf(format string, args ...interface{}) {
|
|
||||||
if s.minLevel <= ErrorLevel {
|
|
||||||
s.output(ErrorLevel, fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Infof writes a formatted info statement to the logging sources
|
|
||||||
func (s *OutputWriter) Infof(format string, args ...interface{}) {
|
|
||||||
if s.minLevel <= InfoLevel {
|
|
||||||
s.output(InfoLevel, fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Debugf writes a formatted debug statement to the logging sources
|
|
||||||
func (s *OutputWriter) Debugf(format string, args ...interface{}) {
|
|
||||||
if s.minLevel <= DebugLevel {
|
|
||||||
s.output(DebugLevel, fmt.Sprintf(format, args...))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Fatalf writes a writes a formatted error statement and runs does an os.exit()
|
|
||||||
func (s *OutputWriter) Fatalf(format string, args ...interface{}) {
|
|
||||||
s.output(FatalLevel, fmt.Sprintf(format, args...))
|
|
||||||
s.syncWriter.Shutdown() // waits for the pending logging to finish
|
|
||||||
osExit(1)
|
|
||||||
}
|
|
||||||
|
|
||||||
// output does the actual write to the sync manager
|
|
||||||
func (s *OutputWriter) output(l Level, content string) {
|
|
||||||
now := time.Now()
|
|
||||||
for _, group := range s.groups {
|
|
||||||
if group.supportsLevel(l) {
|
|
||||||
logLine := fmt.Sprintf("%s%s\n", group.formatter.Timestamp(l, now),
|
|
||||||
group.formatter.Content(l, content))
|
|
||||||
s.syncWriter.Append([]byte(logLine), group)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Write implements io.Writer to support SetOutput of the log package
|
|
||||||
func (s *OutputWriter) Write(p []byte) (n int, err error) {
|
|
||||||
s.Info(string(p))
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
|
@ -1,106 +0,0 @@
|
||||||
package logger
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestLogLevel(t *testing.T) {
|
|
||||||
timeFormat := "2006-01-02"
|
|
||||||
f := NewDefaultFormatter(timeFormat)
|
|
||||||
m := NewWriteManager()
|
|
||||||
|
|
||||||
var testBuffer bytes.Buffer
|
|
||||||
logger := NewOutputWriter(m)
|
|
||||||
logger.Add(&testBuffer, f, InfoLevel, DebugLevel)
|
|
||||||
|
|
||||||
testTime := f.Timestamp(InfoLevel, time.Now())
|
|
||||||
|
|
||||||
testInfo := "hello Dalton, how are you doing?"
|
|
||||||
logger.Info(testInfo)
|
|
||||||
|
|
||||||
tesErr := "hello Austin, how did it break today?"
|
|
||||||
logger.Error(tesErr)
|
|
||||||
|
|
||||||
testDebug := "hello Bill, who are you?"
|
|
||||||
logger.Debug(testDebug)
|
|
||||||
|
|
||||||
m.Shutdown()
|
|
||||||
|
|
||||||
lines := strings.Split(testBuffer.String(), "\n")
|
|
||||||
assert.Len(t, lines, 3, "only expected two strings in the buffer")
|
|
||||||
|
|
||||||
infoLine := lines[0]
|
|
||||||
debugLine := lines[1]
|
|
||||||
|
|
||||||
compareInfo := fmt.Sprintf("%s%s", testTime, testInfo)
|
|
||||||
assert.Equal(t, compareInfo, infoLine, "expect the strings to match")
|
|
||||||
|
|
||||||
compareDebug := fmt.Sprintf("%s%s", testTime, testDebug)
|
|
||||||
assert.Equal(t, compareDebug, debugLine, "expect the strings to match")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestOutputWrite(t *testing.T) {
|
|
||||||
timeFormat := "2006-01-02"
|
|
||||||
f := NewDefaultFormatter(timeFormat)
|
|
||||||
m := NewWriteManager()
|
|
||||||
|
|
||||||
var testBuffer bytes.Buffer
|
|
||||||
logger := NewOutputWriter(m)
|
|
||||||
logger.Add(&testBuffer, f, InfoLevel)
|
|
||||||
|
|
||||||
logger.Debugf("debug message not logged here")
|
|
||||||
|
|
||||||
testData := "hello Bob Bork, how are you doing?"
|
|
||||||
logger.Info(testData)
|
|
||||||
testTime := f.Timestamp(InfoLevel, time.Now())
|
|
||||||
|
|
||||||
m.Shutdown()
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(&testBuffer)
|
|
||||||
scanner.Scan()
|
|
||||||
line := scanner.Text()
|
|
||||||
assert.NoError(t, scanner.Err())
|
|
||||||
|
|
||||||
compareLine := fmt.Sprintf("%s%s", testTime, testData)
|
|
||||||
assert.Equal(t, compareLine, line, "expect the strings to match")
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestFatalWrite(t *testing.T) {
|
|
||||||
timeFormat := "2006-01-02"
|
|
||||||
f := NewDefaultFormatter(timeFormat)
|
|
||||||
m := NewWriteManager()
|
|
||||||
|
|
||||||
var testBuffer bytes.Buffer
|
|
||||||
logger := NewOutputWriter(m)
|
|
||||||
logger.Add(&testBuffer, f, FatalLevel)
|
|
||||||
|
|
||||||
oldOsExit := osExit
|
|
||||||
defer func() { osExit = oldOsExit }()
|
|
||||||
|
|
||||||
var got int
|
|
||||||
myExit := func(code int) {
|
|
||||||
got = code
|
|
||||||
}
|
|
||||||
|
|
||||||
osExit = myExit
|
|
||||||
|
|
||||||
testData := "so long y'all"
|
|
||||||
logger.Fatal(testData)
|
|
||||||
testTime := f.Timestamp(FatalLevel, time.Now())
|
|
||||||
|
|
||||||
scanner := bufio.NewScanner(&testBuffer)
|
|
||||||
scanner.Scan()
|
|
||||||
line := scanner.Text()
|
|
||||||
assert.NoError(t, scanner.Err())
|
|
||||||
|
|
||||||
compareLine := fmt.Sprintf("%s%s", testTime, testData)
|
|
||||||
assert.Equal(t, compareLine, line, "expect the strings to match")
|
|
||||||
assert.Equal(t, got, 1, "exit code should be one for a fatal log")
|
|
||||||
}
|
|
|
@ -10,12 +10,12 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"golang.org/x/net/trace"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
|
"golang.org/x/net/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -23,12 +23,12 @@ const (
|
||||||
startupTime = time.Millisecond * 500
|
startupTime = time.Millisecond * 500
|
||||||
)
|
)
|
||||||
|
|
||||||
func newMetricsHandler(connectionEvents <-chan connection.Event, log logger.Service) *http.ServeMux {
|
func newMetricsHandler(connectionEvents <-chan connection.Event, log *zerolog.Logger) *http.ServeMux {
|
||||||
readyServer := NewReadyServer(connectionEvents, log)
|
readyServer := NewReadyServer(connectionEvents, log)
|
||||||
mux := http.NewServeMux()
|
mux := http.NewServeMux()
|
||||||
mux.Handle("/metrics", promhttp.Handler())
|
mux.Handle("/metrics", promhttp.Handler())
|
||||||
mux.HandleFunc("/healthcheck", func(w http.ResponseWriter, r *http.Request) {
|
mux.HandleFunc("/healthcheck", func(w http.ResponseWriter, r *http.Request) {
|
||||||
fmt.Fprintf(w, "OK\n")
|
_, _ = fmt.Fprintf(w, "OK\n")
|
||||||
})
|
})
|
||||||
mux.Handle("/ready", readyServer)
|
mux.Handle("/ready", readyServer)
|
||||||
return mux
|
return mux
|
||||||
|
@ -38,14 +38,14 @@ func ServeMetrics(
|
||||||
l net.Listener,
|
l net.Listener,
|
||||||
shutdownC <-chan struct{},
|
shutdownC <-chan struct{},
|
||||||
connectionEvents <-chan connection.Event,
|
connectionEvents <-chan connection.Event,
|
||||||
logger logger.Service,
|
log *zerolog.Logger,
|
||||||
) (err error) {
|
) (err error) {
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
// Metrics port is privileged, so no need for further access control
|
// Metrics port is privileged, so no need for further access control
|
||||||
trace.AuthRequest = func(*http.Request) (bool, bool) { return true, true }
|
trace.AuthRequest = func(*http.Request) (bool, bool) { return true, true }
|
||||||
// TODO: parameterize ReadTimeout and WriteTimeout. The maximum time we can
|
// TODO: parameterize ReadTimeout and WriteTimeout. The maximum time we can
|
||||||
// profile CPU usage depends on WriteTimeout
|
// profile CPU usage depends on WriteTimeout
|
||||||
h := newMetricsHandler(connectionEvents, logger)
|
h := newMetricsHandler(connectionEvents, log)
|
||||||
server := &http.Server{
|
server := &http.Server{
|
||||||
ReadTimeout: 10 * time.Second,
|
ReadTimeout: 10 * time.Second,
|
||||||
WriteTimeout: 10 * time.Second,
|
WriteTimeout: 10 * time.Second,
|
||||||
|
@ -57,22 +57,22 @@ func ServeMetrics(
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err = server.Serve(l)
|
err = server.Serve(l)
|
||||||
}()
|
}()
|
||||||
logger.Infof("Starting metrics server on %s", fmt.Sprintf("%v/metrics", l.Addr()))
|
log.Info().Msgf("Starting metrics server on %s", fmt.Sprintf("%v/metrics", l.Addr()))
|
||||||
// server.Serve will hang if server.Shutdown is called before the server is
|
// server.Serve will hang if server.Shutdown is called before the server is
|
||||||
// fully started up. So add artificial delay.
|
// fully started up. So add artificial delay.
|
||||||
time.Sleep(startupTime)
|
time.Sleep(startupTime)
|
||||||
|
|
||||||
<-shutdownC
|
<-shutdownC
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
|
ctx, cancel := context.WithTimeout(context.Background(), shutdownTimeout)
|
||||||
server.Shutdown(ctx)
|
_ = server.Shutdown(ctx)
|
||||||
cancel()
|
cancel()
|
||||||
|
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
if err == http.ErrServerClosed {
|
if err == http.ErrServerClosed {
|
||||||
logger.Info("Metrics server stopped")
|
log.Info().Msg("Metrics server stopped")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
logger.Errorf("Metrics server quit with error: %s", err)
|
log.Error().Msgf("Metrics server quit with error: %s", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -7,18 +7,19 @@ import (
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
conn "github.com/cloudflare/cloudflared/connection"
|
conn "github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
// ReadyServer serves HTTP 200 if the tunnel can serve traffic. Intended for k8s readiness checks.
|
// ReadyServer serves HTTP 200 if the tunnel can serve traffic. Intended for k8s readiness checks.
|
||||||
type ReadyServer struct {
|
type ReadyServer struct {
|
||||||
sync.RWMutex
|
sync.RWMutex
|
||||||
isConnected map[int]bool
|
isConnected map[int]bool
|
||||||
log logger.Service
|
log *zerolog.Logger
|
||||||
}
|
}
|
||||||
|
|
||||||
// NewReadyServer initializes a ReadyServer and starts listening for dis/connection events.
|
// NewReadyServer initializes a ReadyServer and starts listening for dis/connection events.
|
||||||
func NewReadyServer(connectionEvents <-chan conn.Event, log logger.Service) *ReadyServer {
|
func NewReadyServer(connectionEvents <-chan conn.Event, log *zerolog.Logger) *ReadyServer {
|
||||||
rs := ReadyServer{
|
rs := ReadyServer{
|
||||||
isConnected: make(map[int]bool, 0),
|
isConnected: make(map[int]bool, 0),
|
||||||
log: log,
|
log: log,
|
||||||
|
@ -37,7 +38,7 @@ func NewReadyServer(connectionEvents <-chan conn.Event, log logger.Service) *Rea
|
||||||
case conn.SetURL:
|
case conn.SetURL:
|
||||||
continue
|
continue
|
||||||
default:
|
default:
|
||||||
rs.log.Errorf("Unknown connection event case %v", c)
|
rs.log.Error().Msgf("Unknown connection event case %v", c)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
@ -59,9 +60,9 @@ func (rs *ReadyServer) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
}
|
}
|
||||||
msg, err := json.Marshal(body)
|
msg, err := json.Marshal(body)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
fmt.Fprintf(w, `{"error": "%s"}`, err)
|
_, _ = fmt.Fprintf(w, `{"error": "%s"}`, err)
|
||||||
}
|
}
|
||||||
w.Write(msg)
|
_, _ = w.Write(msg)
|
||||||
}
|
}
|
||||||
|
|
||||||
// This is the bulk of the logic for ServeHTTP, broken into its own pure function
|
// This is the bulk of the logic for ServeHTTP, broken into its own pure function
|
||||||
|
|
|
@ -13,10 +13,11 @@ import (
|
||||||
"github.com/cloudflare/cloudflared/buffer"
|
"github.com/cloudflare/cloudflared/buffer"
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/ingress"
|
"github.com/cloudflare/cloudflared/ingress"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
"github.com/cloudflare/cloudflared/websocket"
|
"github.com/cloudflare/cloudflared/websocket"
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -26,15 +27,15 @@ const (
|
||||||
type client struct {
|
type client struct {
|
||||||
ingressRules ingress.Ingress
|
ingressRules ingress.Ingress
|
||||||
tags []tunnelpogs.Tag
|
tags []tunnelpogs.Tag
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
bufferPool *buffer.Pool
|
bufferPool *buffer.Pool
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewClient(ingressRules ingress.Ingress, tags []tunnelpogs.Tag, logger logger.Service) connection.OriginClient {
|
func NewClient(ingressRules ingress.Ingress, tags []tunnelpogs.Tag, log *zerolog.Logger) connection.OriginClient {
|
||||||
return &client{
|
return &client{
|
||||||
ingressRules: ingressRules,
|
ingressRules: ingressRules,
|
||||||
tags: tags,
|
tags: tags,
|
||||||
logger: logger,
|
log: log,
|
||||||
bufferPool: buffer.NewPool(512 * 1024),
|
bufferPool: buffer.NewPool(512 * 1024),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -97,14 +98,14 @@ func (c *client) proxyHTTP(w connection.ResponseWriter, req *http.Request, rule
|
||||||
return nil, errors.Wrap(err, "Error writing response header")
|
return nil, errors.Wrap(err, "Error writing response header")
|
||||||
}
|
}
|
||||||
if connection.IsServerSentEvent(resp.Header) {
|
if connection.IsServerSentEvent(resp.Header) {
|
||||||
c.logger.Debug("Detected Server-Side Events from Origin")
|
c.log.Debug().Msg("Detected Server-Side Events from Origin")
|
||||||
c.writeEventStream(w, resp.Body)
|
c.writeEventStream(w, resp.Body)
|
||||||
} else {
|
} else {
|
||||||
// Use CopyBuffer, because Copy only allocates a 32KiB buffer, and cross-stream
|
// Use CopyBuffer, because Copy only allocates a 32KiB buffer, and cross-stream
|
||||||
// compression generates dictionary on first write
|
// compression generates dictionary on first write
|
||||||
buf := c.bufferPool.Get()
|
buf := c.bufferPool.Get()
|
||||||
defer c.bufferPool.Put(buf)
|
defer c.bufferPool.Put(buf)
|
||||||
io.CopyBuffer(w, resp.Body, buf)
|
_, _ = io.CopyBuffer(w, resp.Body, buf)
|
||||||
}
|
}
|
||||||
return resp, nil
|
return resp, nil
|
||||||
}
|
}
|
||||||
|
@ -129,7 +130,7 @@ func (c *client) proxyWebsocket(w connection.ResponseWriter, req *http.Request,
|
||||||
go func() {
|
go func() {
|
||||||
// serveCtx is done if req is cancelled, or streamWebsocket returns
|
// serveCtx is done if req is cancelled, or streamWebsocket returns
|
||||||
<-serveCtx.Done()
|
<-serveCtx.Done()
|
||||||
conn.Close()
|
_ = conn.Close()
|
||||||
close(connClosedChan)
|
close(connClosedChan)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
@ -159,7 +160,7 @@ func (c *client) writeEventStream(w connection.ResponseWriter, respBody io.ReadC
|
||||||
if err != nil {
|
if err != nil {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
w.Write(line)
|
_, _ = w.Write(line)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -171,46 +172,46 @@ func (c *client) appendTagHeaders(r *http.Request) {
|
||||||
|
|
||||||
func (c *client) logRequest(r *http.Request, cfRay string, lbProbe bool, ruleNum int) {
|
func (c *client) logRequest(r *http.Request, cfRay string, lbProbe bool, ruleNum int) {
|
||||||
if cfRay != "" {
|
if cfRay != "" {
|
||||||
c.logger.Debugf("CF-RAY: %s %s %s %s", cfRay, r.Method, r.URL, r.Proto)
|
c.log.Debug().Msgf("CF-RAY: %s %s %s %s", cfRay, r.Method, r.URL, r.Proto)
|
||||||
} else if lbProbe {
|
} else if lbProbe {
|
||||||
c.logger.Debugf("CF-RAY: %s Load Balancer health check %s %s %s", cfRay, r.Method, r.URL, r.Proto)
|
c.log.Debug().Msgf("CF-RAY: %s Load Balancer health check %s %s %s", cfRay, r.Method, r.URL, r.Proto)
|
||||||
} else {
|
} else {
|
||||||
c.logger.Debugf("All requests should have a CF-RAY header. Please open a support ticket with Cloudflare. %s %s %s ", r.Method, r.URL, r.Proto)
|
c.log.Debug().Msgf("All requests should have a CF-RAY header. Please open a support ticket with Cloudflare. %s %s %s ", r.Method, r.URL, r.Proto)
|
||||||
}
|
}
|
||||||
c.logger.Debugf("CF-RAY: %s Request Headers %+v", cfRay, r.Header)
|
c.log.Debug().Msgf("CF-RAY: %s Request Headers %+v", cfRay, r.Header)
|
||||||
c.logger.Debugf("CF-RAY: %s Serving with ingress rule %d", cfRay, ruleNum)
|
c.log.Debug().Msgf("CF-RAY: %s Serving with ingress rule %d", cfRay, ruleNum)
|
||||||
|
|
||||||
if contentLen := r.ContentLength; contentLen == -1 {
|
if contentLen := r.ContentLength; contentLen == -1 {
|
||||||
c.logger.Debugf("CF-RAY: %s Request Content length unknown", cfRay)
|
c.log.Debug().Msgf("CF-RAY: %s Request Content length unknown", cfRay)
|
||||||
} else {
|
} else {
|
||||||
c.logger.Debugf("CF-RAY: %s Request content length %d", cfRay, contentLen)
|
c.log.Debug().Msgf("CF-RAY: %s Request content length %d", cfRay, contentLen)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) logOriginResponse(r *http.Response, cfRay string, lbProbe bool, ruleNum int) {
|
func (c *client) logOriginResponse(r *http.Response, cfRay string, lbProbe bool, ruleNum int) {
|
||||||
responseByCode.WithLabelValues(strconv.Itoa(r.StatusCode)).Inc()
|
responseByCode.WithLabelValues(strconv.Itoa(r.StatusCode)).Inc()
|
||||||
if cfRay != "" {
|
if cfRay != "" {
|
||||||
c.logger.Debugf("CF-RAY: %s Status: %s served by ingress %d", cfRay, r.Status, ruleNum)
|
c.log.Info().Msgf("CF-RAY: %s Status: %s served by ingress %d", cfRay, r.Status, ruleNum)
|
||||||
} else if lbProbe {
|
} else if lbProbe {
|
||||||
c.logger.Debugf("Response to Load Balancer health check %s", r.Status)
|
c.log.Debug().Msgf("Response to Load Balancer health check %s", r.Status)
|
||||||
} else {
|
} else {
|
||||||
c.logger.Debugf("Status: %s served by ingress %d", r.Status, ruleNum)
|
c.log.Debug().Msgf("Status: %s served by ingress %d", r.Status, ruleNum)
|
||||||
}
|
}
|
||||||
c.logger.Debugf("CF-RAY: %s Response Headers %+v", cfRay, r.Header)
|
c.log.Debug().Msgf("CF-RAY: %s Response Headers %+v", cfRay, r.Header)
|
||||||
|
|
||||||
if contentLen := r.ContentLength; contentLen == -1 {
|
if contentLen := r.ContentLength; contentLen == -1 {
|
||||||
c.logger.Debugf("CF-RAY: %s Response content length unknown", cfRay)
|
c.log.Debug().Msgf("CF-RAY: %s Response content length unknown", cfRay)
|
||||||
} else {
|
} else {
|
||||||
c.logger.Debugf("CF-RAY: %s Response content length %d", cfRay, contentLen)
|
c.log.Debug().Msgf("CF-RAY: %s Response content length %d", cfRay, contentLen)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *client) logRequestError(err error, cfRay string, ruleNum int) {
|
func (c *client) logRequestError(err error, cfRay string, ruleNum int) {
|
||||||
requestErrors.Inc()
|
requestErrors.Inc()
|
||||||
if cfRay != "" {
|
if cfRay != "" {
|
||||||
c.logger.Errorf("CF-RAY: %s Proxying to ingress %d error: %v", cfRay, ruleNum, err)
|
c.log.Error().Msgf("CF-RAY: %s Proxying to ingress %d error: %v", cfRay, ruleNum, err)
|
||||||
} else {
|
} else {
|
||||||
c.logger.Errorf("Proxying to ingress %d error: %v", ruleNum, err)
|
c.log.Error().Msgf("Proxying to ingress %d error: %v", ruleNum, err)
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -16,11 +16,11 @@ import (
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/hello"
|
"github.com/cloudflare/cloudflared/hello"
|
||||||
"github.com/cloudflare/cloudflared/ingress"
|
"github.com/cloudflare/cloudflared/ingress"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
"github.com/urfave/cli/v2"
|
"github.com/urfave/cli/v2"
|
||||||
|
|
||||||
"github.com/gobwas/ws/wsutil"
|
"github.com/gobwas/ws/wsutil"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
"github.com/stretchr/testify/require"
|
"github.com/stretchr/testify/require"
|
||||||
)
|
)
|
||||||
|
@ -49,7 +49,7 @@ func (w *mockHTTPRespWriter) WriteRespHeaders(resp *http.Response) error {
|
||||||
|
|
||||||
func (w *mockHTTPRespWriter) WriteErrorResponse() {
|
func (w *mockHTTPRespWriter) WriteErrorResponse() {
|
||||||
w.WriteHeader(http.StatusBadGateway)
|
w.WriteHeader(http.StatusBadGateway)
|
||||||
w.Write([]byte("http response error"))
|
_, _ = w.Write([]byte("http response error"))
|
||||||
}
|
}
|
||||||
|
|
||||||
func (w *mockHTTPRespWriter) Read(data []byte) (int, error) {
|
func (w *mockHTTPRespWriter) Read(data []byte) (int, error) {
|
||||||
|
@ -106,8 +106,7 @@ func (w *mockSSERespWriter) ReadBytes() []byte {
|
||||||
}
|
}
|
||||||
|
|
||||||
func TestProxySingleOrigin(t *testing.T) {
|
func TestProxySingleOrigin(t *testing.T) {
|
||||||
logger, err := logger.New()
|
log := zerolog.Nop()
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
|
||||||
|
@ -115,18 +114,18 @@ func TestProxySingleOrigin(t *testing.T) {
|
||||||
flagSet.Bool("hello-world", true, "")
|
flagSet.Bool("hello-world", true, "")
|
||||||
|
|
||||||
cliCtx := cli.NewContext(cli.NewApp(), flagSet, nil)
|
cliCtx := cli.NewContext(cli.NewApp(), flagSet, nil)
|
||||||
err = cliCtx.Set("hello-world", "true")
|
err := cliCtx.Set("hello-world", "true")
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
allowURLFromArgs := false
|
allowURLFromArgs := false
|
||||||
ingressRule, err := ingress.NewSingleOrigin(cliCtx, allowURLFromArgs, logger)
|
ingressRule, err := ingress.NewSingleOrigin(cliCtx, allowURLFromArgs)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
errC := make(chan error)
|
errC := make(chan error)
|
||||||
ingressRule.StartOrigins(&wg, logger, ctx.Done(), errC)
|
ingressRule.StartOrigins(&wg, &log, ctx.Done(), errC)
|
||||||
|
|
||||||
client := NewClient(ingressRule, testTags, logger)
|
client := NewClient(ingressRule, testTags, &log)
|
||||||
t.Run("testProxyHTTP", testProxyHTTP(t, client))
|
t.Run("testProxyHTTP", testProxyHTTP(t, client))
|
||||||
t.Run("testProxyWebsocket", testProxyWebsocket(t, client))
|
t.Run("testProxyWebsocket", testProxyWebsocket(t, client))
|
||||||
t.Run("testProxySSE", testProxySSE(t, client))
|
t.Run("testProxySSE", testProxySSE(t, client))
|
||||||
|
@ -191,7 +190,7 @@ func testProxySSE(t *testing.T, client connection.OriginClient) func(t *testing.
|
||||||
return func(t *testing.T) {
|
return func(t *testing.T) {
|
||||||
var (
|
var (
|
||||||
pushCount = 50
|
pushCount = 50
|
||||||
pushFreq = time.Duration(time.Millisecond * 10)
|
pushFreq = time.Millisecond * 10
|
||||||
)
|
)
|
||||||
respWriter := newMockSSERespWriter()
|
respWriter := newMockSSERespWriter()
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
|
@ -252,15 +251,14 @@ func TestProxyMultipleOrigins(t *testing.T) {
|
||||||
})
|
})
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
logger, err := logger.New()
|
log := zerolog.Nop()
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
ctx, cancel := context.WithCancel(context.Background())
|
ctx, cancel := context.WithCancel(context.Background())
|
||||||
errC := make(chan error)
|
errC := make(chan error)
|
||||||
var wg sync.WaitGroup
|
var wg sync.WaitGroup
|
||||||
ingress.StartOrigins(&wg, logger, ctx.Done(), errC)
|
ingress.StartOrigins(&wg, &log, ctx.Done(), errC)
|
||||||
|
|
||||||
client := NewClient(ingress, testTags, logger)
|
client := NewClient(ingress, testTags, &log)
|
||||||
|
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
url string
|
url string
|
||||||
|
@ -314,7 +312,7 @@ type mockAPI struct{}
|
||||||
|
|
||||||
func (ma mockAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (ma mockAPI) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
w.WriteHeader(http.StatusCreated)
|
w.WriteHeader(http.StatusCreated)
|
||||||
w.Write([]byte("Created"))
|
_, _ = w.Write([]byte("Created"))
|
||||||
}
|
}
|
||||||
|
|
||||||
type errorOriginTransport struct{}
|
type errorOriginTransport struct{}
|
||||||
|
@ -336,10 +334,9 @@ func TestProxyError(t *testing.T) {
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|
||||||
logger, err := logger.New()
|
log := zerolog.Nop()
|
||||||
require.NoError(t, err)
|
|
||||||
|
|
||||||
client := NewClient(ingress, testTags, logger)
|
client := NewClient(ingress, testTags, &log)
|
||||||
|
|
||||||
respWriter := newMockHTTPRespWriter()
|
respWriter := newMockHTTPRespWriter()
|
||||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1", nil)
|
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1", nil)
|
||||||
|
|
|
@ -6,14 +6,14 @@ import (
|
||||||
"net"
|
"net"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/google/uuid"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/edgediscovery"
|
"github.com/cloudflare/cloudflared/edgediscovery"
|
||||||
"github.com/cloudflare/cloudflared/h2mux"
|
"github.com/cloudflare/cloudflared/h2mux"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/signal"
|
"github.com/cloudflare/cloudflared/signal"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
@ -50,7 +50,7 @@ type Supervisor struct {
|
||||||
nextConnectedIndex int
|
nextConnectedIndex int
|
||||||
nextConnectedSignal chan struct{}
|
nextConnectedSignal chan struct{}
|
||||||
|
|
||||||
logger logger.Service
|
log *zerolog.Logger
|
||||||
|
|
||||||
reconnectCredentialManager *reconnectCredentialManager
|
reconnectCredentialManager *reconnectCredentialManager
|
||||||
useReconnectToken bool
|
useReconnectToken bool
|
||||||
|
@ -68,9 +68,9 @@ func NewSupervisor(config *TunnelConfig, cloudflaredUUID uuid.UUID) (*Supervisor
|
||||||
err error
|
err error
|
||||||
)
|
)
|
||||||
if len(config.EdgeAddrs) > 0 {
|
if len(config.EdgeAddrs) > 0 {
|
||||||
edgeIPs, err = edgediscovery.StaticEdge(config.Logger, config.EdgeAddrs)
|
edgeIPs, err = edgediscovery.StaticEdge(config.Log, config.EdgeAddrs)
|
||||||
} else {
|
} else {
|
||||||
edgeIPs, err = edgediscovery.ResolveEdge(config.Logger)
|
edgeIPs, err = edgediscovery.ResolveEdge(config.Log)
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -87,7 +87,7 @@ func NewSupervisor(config *TunnelConfig, cloudflaredUUID uuid.UUID) (*Supervisor
|
||||||
edgeIPs: edgeIPs,
|
edgeIPs: edgeIPs,
|
||||||
tunnelErrors: make(chan tunnelError),
|
tunnelErrors: make(chan tunnelError),
|
||||||
tunnelsConnecting: map[int]chan struct{}{},
|
tunnelsConnecting: map[int]chan struct{}{},
|
||||||
logger: config.Logger,
|
log: config.Log,
|
||||||
reconnectCredentialManager: newReconnectCredentialManager(connection.MetricsNamespace, connection.TunnelSubsystem, config.HAConnections),
|
reconnectCredentialManager: newReconnectCredentialManager(connection.MetricsNamespace, connection.TunnelSubsystem, config.HAConnections),
|
||||||
useReconnectToken: useReconnectToken,
|
useReconnectToken: useReconnectToken,
|
||||||
}, nil
|
}, nil
|
||||||
|
@ -110,7 +110,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re
|
||||||
if timer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate); err == nil {
|
if timer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate); err == nil {
|
||||||
refreshAuthBackoffTimer = timer
|
refreshAuthBackoffTimer = timer
|
||||||
} else {
|
} else {
|
||||||
s.logger.Errorf("supervisor: initial refreshAuth failed, retrying in %v: %s", refreshAuthRetryDuration, err)
|
s.log.Error().Msgf("supervisor: initial refreshAuth failed, retrying in %v: %s", refreshAuthRetryDuration, err)
|
||||||
refreshAuthBackoffTimer = time.After(refreshAuthRetryDuration)
|
refreshAuthBackoffTimer = time.After(refreshAuthRetryDuration)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -129,7 +129,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re
|
||||||
case tunnelError := <-s.tunnelErrors:
|
case tunnelError := <-s.tunnelErrors:
|
||||||
tunnelsActive--
|
tunnelsActive--
|
||||||
if tunnelError.err != nil {
|
if tunnelError.err != nil {
|
||||||
s.logger.Infof("supervisor: Tunnel disconnected due to error: %s", tunnelError.err)
|
s.log.Info().Msgf("supervisor: Tunnel disconnected due to error: %s", tunnelError.err)
|
||||||
tunnelsWaiting = append(tunnelsWaiting, tunnelError.index)
|
tunnelsWaiting = append(tunnelsWaiting, tunnelError.index)
|
||||||
s.waitForNextTunnel(tunnelError.index)
|
s.waitForNextTunnel(tunnelError.index)
|
||||||
|
|
||||||
|
@ -152,7 +152,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re
|
||||||
case <-refreshAuthBackoffTimer:
|
case <-refreshAuthBackoffTimer:
|
||||||
newTimer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate)
|
newTimer, err := s.reconnectCredentialManager.RefreshAuth(ctx, refreshAuthBackoff, s.authenticate)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
s.logger.Errorf("supervisor: Authentication failed: %s", err)
|
s.log.Error().Msgf("supervisor: Authentication failed: %s", err)
|
||||||
// Permanent failure. Leave the `select` without setting the
|
// Permanent failure. Leave the `select` without setting the
|
||||||
// channel to be non-null, so we'll never hit this case of the `select` again.
|
// channel to be non-null, so we'll never hit this case of the `select` again.
|
||||||
continue
|
continue
|
||||||
|
@ -172,7 +172,7 @@ func (s *Supervisor) Run(ctx context.Context, connectedSignal *signal.Signal, re
|
||||||
func (s *Supervisor) initialize(ctx context.Context, connectedSignal *signal.Signal, reconnectCh chan ReconnectSignal) error {
|
func (s *Supervisor) initialize(ctx context.Context, connectedSignal *signal.Signal, reconnectCh chan ReconnectSignal) error {
|
||||||
availableAddrs := int(s.edgeIPs.AvailableAddrs())
|
availableAddrs := int(s.edgeIPs.AvailableAddrs())
|
||||||
if s.config.HAConnections > availableAddrs {
|
if s.config.HAConnections > availableAddrs {
|
||||||
s.logger.Infof("You requested %d HA connections but I can give you at most %d.", s.config.HAConnections, availableAddrs)
|
s.log.Info().Msgf("You requested %d HA connections but I can give you at most %d.", s.config.HAConnections, availableAddrs)
|
||||||
s.config.HAConnections = availableAddrs
|
s.config.HAConnections = availableAddrs
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -295,7 +295,7 @@ func (s *Supervisor) authenticate(ctx context.Context, numPreviousAttempts int)
|
||||||
// This callback is invoked by h2mux when the edge initiates a stream.
|
// This callback is invoked by h2mux when the edge initiates a stream.
|
||||||
return nil // noop
|
return nil // noop
|
||||||
})
|
})
|
||||||
muxerConfig := s.config.MuxerConfig.H2MuxerConfig(handler, s.logger)
|
muxerConfig := s.config.MuxerConfig.H2MuxerConfig(handler, s.log)
|
||||||
muxer, err := h2mux.Handshake(edgeConn, edgeConn, *muxerConfig, h2mux.ActiveStreams)
|
muxer, err := h2mux.Handshake(edgeConn, edgeConn, *muxerConfig, h2mux.ActiveStreams)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
@ -311,7 +311,7 @@ func (s *Supervisor) authenticate(ctx context.Context, numPreviousAttempts int)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
rpcClient := connection.NewTunnelServerClient(ctx, stream, s.logger)
|
rpcClient := connection.NewTunnelServerClient(ctx, stream, s.log)
|
||||||
defer rpcClient.Close()
|
defer rpcClient.Close()
|
||||||
|
|
||||||
const arbitraryConnectionID = uint8(0)
|
const arbitraryConnectionID = uint8(0)
|
||||||
|
|
|
@ -13,13 +13,13 @@ import (
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/cmd/cloudflared/buildinfo"
|
"github.com/cloudflare/cloudflared/cmd/cloudflared/buildinfo"
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/edgediscovery"
|
"github.com/cloudflare/cloudflared/edgediscovery"
|
||||||
"github.com/cloudflare/cloudflared/h2mux"
|
"github.com/cloudflare/cloudflared/h2mux"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/signal"
|
"github.com/cloudflare/cloudflared/signal"
|
||||||
"github.com/cloudflare/cloudflared/tunnelrpc"
|
"github.com/cloudflare/cloudflared/tunnelrpc"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
|
@ -55,7 +55,7 @@ type TunnelConfig struct {
|
||||||
IsFreeTunnel bool
|
IsFreeTunnel bool
|
||||||
LBPool string
|
LBPool string
|
||||||
Tags []tunnelpogs.Tag
|
Tags []tunnelpogs.Tag
|
||||||
Logger logger.Service
|
Log *zerolog.Logger
|
||||||
Observer *connection.Observer
|
Observer *connection.Observer
|
||||||
ReportedVersion string
|
ReportedVersion string
|
||||||
Retries uint
|
Retries uint
|
||||||
|
@ -235,7 +235,7 @@ func waitForBackoff(
|
||||||
}
|
}
|
||||||
|
|
||||||
config.Observer.SendReconnect(connIndex)
|
config.Observer.SendReconnect(connIndex)
|
||||||
config.Logger.Infof("Retrying connection %d in %s seconds, error %v", connIndex, duration, err)
|
config.Log.Info().Msgf("Retrying connection %d in %s seconds, error %v", connIndex, duration, err)
|
||||||
protobackoff.Backoff(ctx)
|
protobackoff.Backoff(ctx)
|
||||||
|
|
||||||
if protobackoff.ReachedMaxRetries() {
|
if protobackoff.ReachedMaxRetries() {
|
||||||
|
@ -247,13 +247,13 @@ func waitForBackoff(
|
||||||
if protobackoff.protocol == fallback {
|
if protobackoff.protocol == fallback {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
config.Logger.Infof("Fallback to use %s", fallback)
|
config.Log.Info().Msgf("Fallback to use %s", fallback)
|
||||||
protobackoff.fallback(fallback)
|
protobackoff.fallback(fallback)
|
||||||
} else if !protobackoff.inFallback {
|
} else if !protobackoff.inFallback {
|
||||||
current := config.ProtocolSelector.Current()
|
current := config.ProtocolSelector.Current()
|
||||||
if protobackoff.protocol != current {
|
if protobackoff.protocol != current {
|
||||||
protobackoff.protocol = current
|
protobackoff.protocol = current
|
||||||
config.Logger.Infof("Change protocol to %s", current)
|
config.Log.Info().Msgf("Change protocol to %s", current)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
@ -311,9 +311,16 @@ func ServeH2mux(
|
||||||
cloudflaredUUID uuid.UUID,
|
cloudflaredUUID uuid.UUID,
|
||||||
reconnectCh chan ReconnectSignal,
|
reconnectCh chan ReconnectSignal,
|
||||||
) (err error, recoverable bool) {
|
) (err error, recoverable bool) {
|
||||||
config.Logger.Debugf("Connecting via h2mux")
|
config.Log.Debug().Msgf("Connecting via h2mux")
|
||||||
// Returns error from parsing the origin URL or handshake errors
|
// Returns error from parsing the origin URL or handshake errors
|
||||||
handler, err, recoverable := connection.NewH2muxConnection(ctx, config.ConnectionConfig, config.MuxerConfig, edgeConn, connectionIndex, config.Observer)
|
handler, err, recoverable := connection.NewH2muxConnection(
|
||||||
|
ctx,
|
||||||
|
config.ConnectionConfig,
|
||||||
|
config.MuxerConfig,
|
||||||
|
edgeConn,
|
||||||
|
connectionIndex,
|
||||||
|
config.Observer,
|
||||||
|
)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err, recoverable
|
return err, recoverable
|
||||||
}
|
}
|
||||||
|
@ -338,29 +345,29 @@ func ServeH2mux(
|
||||||
// don't retry this connection anymore, let supervisor pick new a address
|
// don't retry this connection anymore, let supervisor pick new a address
|
||||||
return err, false
|
return err, false
|
||||||
case *serverRegisterTunnelError:
|
case *serverRegisterTunnelError:
|
||||||
config.Logger.Errorf("Register tunnel error from server side: %s", err.cause)
|
config.Log.Error().Msgf("Register tunnel error from server side: %s", err.cause)
|
||||||
// Don't send registration error return from server to Sentry. They are
|
// Don't send registration error return from server to Sentry. They are
|
||||||
// logged on server side
|
// logged on server side
|
||||||
if incidents := config.IncidentLookup.ActiveIncidents(); len(incidents) > 0 {
|
if incidents := config.IncidentLookup.ActiveIncidents(); len(incidents) > 0 {
|
||||||
config.Logger.Error(activeIncidentsMsg(incidents))
|
config.Log.Error().Msg(activeIncidentsMsg(incidents))
|
||||||
}
|
}
|
||||||
return err.cause, !err.permanent
|
return err.cause, !err.permanent
|
||||||
case *clientRegisterTunnelError:
|
case *clientRegisterTunnelError:
|
||||||
config.Logger.Errorf("Register tunnel error on client side: %s", err.cause)
|
config.Log.Error().Msgf("Register tunnel error on client side: %s", err.cause)
|
||||||
return err, true
|
return err, true
|
||||||
case *muxerShutdownError:
|
case *muxerShutdownError:
|
||||||
config.Logger.Info("Muxer shutdown")
|
config.Log.Info().Msg("Muxer shutdown")
|
||||||
return err, true
|
return err, true
|
||||||
case *ReconnectSignal:
|
case *ReconnectSignal:
|
||||||
config.Logger.Infof("Restarting connection %d due to reconnect signal in %s", connectionIndex, err.Delay)
|
config.Log.Info().Msgf("Restarting connection %d due to reconnect signal in %s", connectionIndex, err.Delay)
|
||||||
err.DelayBeforeReconnect()
|
err.DelayBeforeReconnect()
|
||||||
return err, true
|
return err, true
|
||||||
default:
|
default:
|
||||||
if err == context.Canceled {
|
if err == context.Canceled {
|
||||||
config.Logger.Debugf("Serve tunnel error: %s", err)
|
config.Log.Debug().Msgf("Serve tunnel error: %s", err)
|
||||||
return err, false
|
return err, false
|
||||||
}
|
}
|
||||||
config.Logger.Errorf("Serve tunnel error: %s", err)
|
config.Log.Error().Msgf("Serve tunnel error: %s", err)
|
||||||
return err, true
|
return err, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -376,8 +383,16 @@ func ServeHTTP2(
|
||||||
connectedFuse connection.ConnectedFuse,
|
connectedFuse connection.ConnectedFuse,
|
||||||
reconnectCh chan ReconnectSignal,
|
reconnectCh chan ReconnectSignal,
|
||||||
) (err error, recoverable bool) {
|
) (err error, recoverable bool) {
|
||||||
config.Logger.Debugf("Connecting via http2")
|
config.Log.Debug().Msgf("Connecting via http2")
|
||||||
server := connection.NewHTTP2Connection(tlsServerConn, config.ConnectionConfig, config.NamedTunnel, connOptions, config.Observer, connIndex, connectedFuse)
|
server := connection.NewHTTP2Connection(
|
||||||
|
tlsServerConn,
|
||||||
|
config.ConnectionConfig,
|
||||||
|
config.NamedTunnel,
|
||||||
|
connOptions,
|
||||||
|
config.Observer,
|
||||||
|
connIndex,
|
||||||
|
connectedFuse,
|
||||||
|
)
|
||||||
|
|
||||||
errGroup, serveCtx := errgroup.WithContext(ctx)
|
errGroup, serveCtx := errgroup.WithContext(ctx)
|
||||||
errGroup.Go(func() error {
|
errGroup.Go(func() error {
|
||||||
|
|
|
@ -7,7 +7,8 @@ import (
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -31,8 +32,7 @@ func TestWaitForBackoffFallback(t *testing.T) {
|
||||||
BaseTime: time.Millisecond * 10,
|
BaseTime: time.Millisecond * 10,
|
||||||
}
|
}
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
logger, err := logger.New()
|
log := zerolog.Nop()
|
||||||
assert.NoError(t, err)
|
|
||||||
resolveTTL := time.Duration(0)
|
resolveTTL := time.Duration(0)
|
||||||
namedTunnel := &connection.NamedTunnelConfig{
|
namedTunnel := &connection.NamedTunnelConfig{
|
||||||
Credentials: connection.Credentials{
|
Credentials: connection.Credentials{
|
||||||
|
@ -42,10 +42,16 @@ func TestWaitForBackoffFallback(t *testing.T) {
|
||||||
mockFetcher := dynamicMockFetcher{
|
mockFetcher := dynamicMockFetcher{
|
||||||
percentage: 0,
|
percentage: 0,
|
||||||
}
|
}
|
||||||
protocolSelector, err := connection.NewProtocolSelector(connection.HTTP2.String(), namedTunnel, mockFetcher.fetch(), resolveTTL, logger)
|
protocolSelector, err := connection.NewProtocolSelector(
|
||||||
|
connection.HTTP2.String(),
|
||||||
|
namedTunnel,
|
||||||
|
mockFetcher.fetch(),
|
||||||
|
resolveTTL,
|
||||||
|
&log,
|
||||||
|
)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
config := &TunnelConfig{
|
config := &TunnelConfig{
|
||||||
Logger: logger,
|
Log: &log,
|
||||||
ProtocolSelector: protocolSelector,
|
ProtocolSelector: protocolSelector,
|
||||||
Observer: connection.NewObserver(nil, nil, false),
|
Observer: connection.NewObserver(nil, nil, false),
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,37 +0,0 @@
|
||||||
package sshlog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
//empty manager implements the Manager but does nothing (for testing and to disable logging unless the logs are set)
|
|
||||||
type emptyManager struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
type emptyWriteCloser struct {
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewEmptyManager creates a new instance of a log empty log manager that does nothing
|
|
||||||
func NewEmptyManager() Manager {
|
|
||||||
return &emptyManager{}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *emptyManager) NewLogger(name string, logger logger.Service) (io.WriteCloser, error) {
|
|
||||||
return &emptyWriteCloser{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *emptyManager) NewSessionLogger(name string, logger logger.Service) (io.WriteCloser, error) {
|
|
||||||
return &emptyWriteCloser{}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// emptyWriteCloser
|
|
||||||
|
|
||||||
func (w *emptyWriteCloser) Write(p []byte) (n int, err error) {
|
|
||||||
return len(p), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (w *emptyWriteCloser) Close() error {
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,15 +0,0 @@
|
||||||
# Generate go.capnp.out with:
|
|
||||||
# capnp compile -o- go.capnp > go.capnp.out
|
|
||||||
# Must run inside this directory to preserve paths.
|
|
||||||
|
|
||||||
@0xd12a1c51fedd6c88;
|
|
||||||
|
|
||||||
annotation package(file) :Text;
|
|
||||||
annotation import(file) :Text;
|
|
||||||
annotation doc(struct, field, enum) :Text;
|
|
||||||
annotation tag(enumerant) :Text;
|
|
||||||
annotation notag(enumerant) :Void;
|
|
||||||
annotation customtype(field) :Text;
|
|
||||||
annotation name(struct, field, union, enum, enumerant, interface, method, param, annotation, const, group) :Text;
|
|
||||||
|
|
||||||
$package("capnp");
|
|
167
sshlog/logger.go
167
sshlog/logger.go
|
@ -1,167 +0,0 @@
|
||||||
package sshlog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bufio"
|
|
||||||
"errors"
|
|
||||||
"fmt"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"sync"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
logTimeFormat = "2006-01-02T15-04-05.000"
|
|
||||||
megabyte = 1024 * 1024
|
|
||||||
defaultFileSizeLimit = 100 * megabyte
|
|
||||||
)
|
|
||||||
|
|
||||||
// Logger will buffer and write events to disk
|
|
||||||
type Logger struct {
|
|
||||||
sync.Mutex
|
|
||||||
filename string
|
|
||||||
file *os.File
|
|
||||||
writeBuffer *bufio.Writer
|
|
||||||
logger logger.Service
|
|
||||||
flushInterval time.Duration
|
|
||||||
maxFileSize int64
|
|
||||||
done chan struct{}
|
|
||||||
once sync.Once
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewLogger creates a Logger instance. A buffer is created that needs to be
|
|
||||||
// drained and closed when the caller is finished, so instances should call
|
|
||||||
// Close when finished with this Logger instance. Writes will be flushed to disk
|
|
||||||
// every second (fsync). filename is the name of the logfile to be created. The
|
|
||||||
// logger variable is a logger service that will log all i/o, filesystem error etc, that
|
|
||||||
// that shouldn't end execution of the logger, but are useful to report to the
|
|
||||||
// caller.
|
|
||||||
func NewLogger(filename string, logger logger.Service, flushInterval time.Duration, maxFileSize int64) (*Logger, error) {
|
|
||||||
if logger == nil {
|
|
||||||
return nil, errors.New("logger can't be nil")
|
|
||||||
}
|
|
||||||
f, err := os.OpenFile(filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600))
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
l := &Logger{filename: filename,
|
|
||||||
file: f,
|
|
||||||
writeBuffer: bufio.NewWriter(f),
|
|
||||||
logger: logger,
|
|
||||||
flushInterval: flushInterval,
|
|
||||||
maxFileSize: maxFileSize,
|
|
||||||
done: make(chan struct{}),
|
|
||||||
}
|
|
||||||
|
|
||||||
go l.writer()
|
|
||||||
return l, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes to a log buffer. Implements the io.Writer interface.
|
|
||||||
func (l *Logger) Write(p []byte) (n int, err error) {
|
|
||||||
l.Lock()
|
|
||||||
defer l.Unlock()
|
|
||||||
return l.writeBuffer.Write(p)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close drains anything left in the buffer and cleans up any resources still
|
|
||||||
// in use.
|
|
||||||
func (l *Logger) Close() error {
|
|
||||||
l.once.Do(func() {
|
|
||||||
close(l.done)
|
|
||||||
})
|
|
||||||
if err := l.write(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
return l.file.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// writer is the run loop that handles draining the write buffer and syncing
|
|
||||||
// data to disk.
|
|
||||||
func (l *Logger) writer() {
|
|
||||||
ticker := time.NewTicker(l.flushInterval)
|
|
||||||
defer ticker.Stop()
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case <-ticker.C:
|
|
||||||
if err := l.write(); err != nil {
|
|
||||||
l.logger.Errorf("%s", err)
|
|
||||||
}
|
|
||||||
case <-l.done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// write does the actual system write calls to disk and does a rotation if the
|
|
||||||
// file size limit has been reached. Since the rotation happens at the end,
|
|
||||||
// the rotation is a soft limit (aka the file can be bigger than the max limit
|
|
||||||
// because of the final buffer flush)
|
|
||||||
func (l *Logger) write() error {
|
|
||||||
l.Lock()
|
|
||||||
defer l.Unlock()
|
|
||||||
|
|
||||||
if l.writeBuffer.Buffered() <= 0 {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.writeBuffer.Flush(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := l.file.Sync(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if l.shouldRotate() {
|
|
||||||
return l.rotate()
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// shouldRotate checks to see if the current file should be rotated to a new
|
|
||||||
// logfile.
|
|
||||||
func (l *Logger) shouldRotate() bool {
|
|
||||||
info, err := l.file.Stat()
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
return info.Size() >= l.maxFileSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// rotate creates a new logfile with the existing filename and renames the
|
|
||||||
// existing file with a current timestamp.
|
|
||||||
func (l *Logger) rotate() error {
|
|
||||||
if err := l.file.Close(); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
// move the existing file
|
|
||||||
newname := rotationName(l.filename)
|
|
||||||
if err := os.Rename(l.filename, newname); err != nil {
|
|
||||||
return fmt.Errorf("can't rename log file: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
f, err := os.OpenFile(l.filename, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.FileMode(0600))
|
|
||||||
if err != nil {
|
|
||||||
return fmt.Errorf("failed to open new logfile %s", err)
|
|
||||||
}
|
|
||||||
l.file = f
|
|
||||||
l.writeBuffer = bufio.NewWriter(f)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// rotationName creates a new filename from the given name, inserting a timestamp
|
|
||||||
// between the filename and the extension.
|
|
||||||
func rotationName(name string) string {
|
|
||||||
dir := filepath.Dir(name)
|
|
||||||
filename := filepath.Base(name)
|
|
||||||
ext := filepath.Ext(filename)
|
|
||||||
prefix := filename[:len(filename)-len(ext)]
|
|
||||||
t := time.Now()
|
|
||||||
timestamp := t.Format(logTimeFormat)
|
|
||||||
return filepath.Join(dir, fmt.Sprintf("%s-%s%s", prefix, timestamp, ext))
|
|
||||||
}
|
|
|
@ -1,90 +0,0 @@
|
||||||
package sshlog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"log"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
"strings"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
const logFileName = "test-logger.log"
|
|
||||||
|
|
||||||
func createLogger(t *testing.T) *Logger {
|
|
||||||
os.Remove(logFileName)
|
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
|
||||||
logger, err := NewLogger(logFileName, l, time.Millisecond, 1024)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("couldn't create the logger!", err)
|
|
||||||
}
|
|
||||||
return logger
|
|
||||||
}
|
|
||||||
|
|
||||||
// AUTH-2115 TODO: fix this test
|
|
||||||
//func TestWrite(t *testing.T) {
|
|
||||||
// testStr := "hi"
|
|
||||||
// logger := createLogger(t)
|
|
||||||
// defer func() {
|
|
||||||
// logger.Close()
|
|
||||||
// os.Remove(logFileName)
|
|
||||||
// }()
|
|
||||||
//
|
|
||||||
// logger.Write([]byte(testStr))
|
|
||||||
// time.DelayBeforeReconnect(2 * time.Millisecond)
|
|
||||||
// data, err := ioutil.ReadFile(logFileName)
|
|
||||||
// if err != nil {
|
|
||||||
// t.Fatal("couldn't read the log file!", err)
|
|
||||||
// }
|
|
||||||
// checkStr := string(data)
|
|
||||||
// if checkStr != testStr {
|
|
||||||
// t.Fatal("file data doesn't match!")
|
|
||||||
// }
|
|
||||||
//}
|
|
||||||
|
|
||||||
func TestFilenameRotation(t *testing.T) {
|
|
||||||
newName := rotationName("dir/bob/acoolloggername.log")
|
|
||||||
|
|
||||||
dir := filepath.Dir(newName)
|
|
||||||
if dir != "dir/bob" {
|
|
||||||
t.Fatal("rotation name doesn't respect the directory filepath:", newName)
|
|
||||||
}
|
|
||||||
|
|
||||||
filename := filepath.Base(newName)
|
|
||||||
if !strings.HasPrefix(filename, "acoolloggername") {
|
|
||||||
t.Fatal("rotation filename is wrong:", filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
ext := filepath.Ext(newName)
|
|
||||||
if ext != ".log" {
|
|
||||||
t.Fatal("rotation file extension is wrong:", ext)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestRotation(t *testing.T) {
|
|
||||||
logger := createLogger(t)
|
|
||||||
|
|
||||||
for i := 0; i < 2000; i++ {
|
|
||||||
logger.Write([]byte("a string for testing rotation\n"))
|
|
||||||
}
|
|
||||||
logger.Close()
|
|
||||||
|
|
||||||
count := 0
|
|
||||||
filepath.Walk(".", func(path string, info os.FileInfo, err error) error {
|
|
||||||
if err != nil || info.IsDir() {
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
if strings.HasPrefix(info.Name(), "test-logger") {
|
|
||||||
log.Println("deleting: ", path)
|
|
||||||
os.Remove(path)
|
|
||||||
count++
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
})
|
|
||||||
if count < 2 {
|
|
||||||
t.Fatal("rotation didn't roll files:", count)
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
|
@ -1,34 +0,0 @@
|
||||||
package sshlog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"io"
|
|
||||||
"path/filepath"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Manager be managing logs bruh
|
|
||||||
type Manager interface {
|
|
||||||
NewLogger(string, logger.Service) (io.WriteCloser, error)
|
|
||||||
NewSessionLogger(string, logger.Service) (io.WriteCloser, error)
|
|
||||||
}
|
|
||||||
|
|
||||||
type manager struct {
|
|
||||||
baseDirectory string
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new instance of a log manager
|
|
||||||
func New(baseDirectory string) Manager {
|
|
||||||
return &manager{
|
|
||||||
baseDirectory: baseDirectory,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manager) NewLogger(name string, logger logger.Service) (io.WriteCloser, error) {
|
|
||||||
return NewLogger(filepath.Join(m.baseDirectory, name), logger, time.Second, defaultFileSizeLimit)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (m *manager) NewSessionLogger(name string, logger logger.Service) (io.WriteCloser, error) {
|
|
||||||
return NewSessionLogger(filepath.Join(m.baseDirectory, name), logger, time.Second, defaultFileSizeLimit)
|
|
||||||
}
|
|
|
@ -1,9 +0,0 @@
|
||||||
using Go = import "go.capnp";
|
|
||||||
@0x8f43375162194466;
|
|
||||||
$Go.package("sshlog");
|
|
||||||
$Go.import("github.com/cloudflare/cloudflared/sshlog");
|
|
||||||
|
|
||||||
struct SessionLog {
|
|
||||||
timestamp @0 :Text;
|
|
||||||
content @1 :Data;
|
|
||||||
}
|
|
|
@ -1,110 +0,0 @@
|
||||||
// Code generated by capnpc-go. DO NOT EDIT.
|
|
||||||
|
|
||||||
package sshlog
|
|
||||||
|
|
||||||
import (
|
|
||||||
capnp "zombiezen.com/go/capnproto2"
|
|
||||||
text "zombiezen.com/go/capnproto2/encoding/text"
|
|
||||||
schemas "zombiezen.com/go/capnproto2/schemas"
|
|
||||||
)
|
|
||||||
|
|
||||||
type SessionLog struct{ capnp.Struct }
|
|
||||||
|
|
||||||
// SessionLog_TypeID is the unique identifier for the type SessionLog.
|
|
||||||
const SessionLog_TypeID = 0xa13a07c504a5ab64
|
|
||||||
|
|
||||||
func NewSessionLog(s *capnp.Segment) (SessionLog, error) {
|
|
||||||
st, err := capnp.NewStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2})
|
|
||||||
return SessionLog{st}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func NewRootSessionLog(s *capnp.Segment) (SessionLog, error) {
|
|
||||||
st, err := capnp.NewRootStruct(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2})
|
|
||||||
return SessionLog{st}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func ReadRootSessionLog(msg *capnp.Message) (SessionLog, error) {
|
|
||||||
root, err := msg.RootPtr()
|
|
||||||
return SessionLog{root.Struct()}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SessionLog) String() string {
|
|
||||||
str, _ := text.Marshal(0xa13a07c504a5ab64, s.Struct)
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SessionLog) Timestamp() (string, error) {
|
|
||||||
p, err := s.Struct.Ptr(0)
|
|
||||||
return p.Text(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SessionLog) HasTimestamp() bool {
|
|
||||||
p, err := s.Struct.Ptr(0)
|
|
||||||
return p.IsValid() || err != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SessionLog) TimestampBytes() ([]byte, error) {
|
|
||||||
p, err := s.Struct.Ptr(0)
|
|
||||||
return p.TextBytes(), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SessionLog) SetTimestamp(v string) error {
|
|
||||||
return s.Struct.SetText(0, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SessionLog) Content() ([]byte, error) {
|
|
||||||
p, err := s.Struct.Ptr(1)
|
|
||||||
return []byte(p.Data()), err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SessionLog) HasContent() bool {
|
|
||||||
p, err := s.Struct.Ptr(1)
|
|
||||||
return p.IsValid() || err != nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SessionLog) SetContent(v []byte) error {
|
|
||||||
return s.Struct.SetData(1, v)
|
|
||||||
}
|
|
||||||
|
|
||||||
// SessionLog_List is a list of SessionLog.
|
|
||||||
type SessionLog_List struct{ capnp.List }
|
|
||||||
|
|
||||||
// NewSessionLog creates a new list of SessionLog.
|
|
||||||
func NewSessionLog_List(s *capnp.Segment, sz int32) (SessionLog_List, error) {
|
|
||||||
l, err := capnp.NewCompositeList(s, capnp.ObjectSize{DataSize: 0, PointerCount: 2}, sz)
|
|
||||||
return SessionLog_List{l}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s SessionLog_List) At(i int) SessionLog { return SessionLog{s.List.Struct(i)} }
|
|
||||||
|
|
||||||
func (s SessionLog_List) Set(i int, v SessionLog) error { return s.List.SetStruct(i, v.Struct) }
|
|
||||||
|
|
||||||
func (s SessionLog_List) String() string {
|
|
||||||
str, _ := text.MarshalList(0xa13a07c504a5ab64, s.List)
|
|
||||||
return str
|
|
||||||
}
|
|
||||||
|
|
||||||
// SessionLog_Promise is a wrapper for a SessionLog promised by a client call.
|
|
||||||
type SessionLog_Promise struct{ *capnp.Pipeline }
|
|
||||||
|
|
||||||
func (p SessionLog_Promise) Struct() (SessionLog, error) {
|
|
||||||
s, err := p.Pipeline.Struct()
|
|
||||||
return SessionLog{s}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
const schema_8f43375162194466 = "x\xda\x120q`\x12d\x8dg`\x08dae\xfb" +
|
|
||||||
"\x9f\xb2z)\xcbQv\xab\x85\x0c\x82B\x8c\xff\xd3\\" +
|
|
||||||
"$\x93\x02\xcd\x9d\xfb\x19X\x99\xd8\x19\x18\x04E_\x09" +
|
|
||||||
"*\x82h\xd9r\x06\xc6\xff\xc5\xa9\xc5\xc5\x99\xf9y\xf1" +
|
|
||||||
"L9\xf9\xe9z\xc9\x89\x05y\x05V\xc1`!\xfe<" +
|
|
||||||
"\x9f\xfc\xf4\x00F\xc6@\x0ef\x16\x06\x06\x16F\x06\x06" +
|
|
||||||
"A\xcd \x06\x86@\x0df\xc6@\x13&FAFF" +
|
|
||||||
"\x11F\x90\xa0\xa1\x13\x03C\xa0\x0e3c\xa0\x05\x13\xe3" +
|
|
||||||
"\xff\x92\xcc\xdc\xd4\xe2\x92\xc4\\\x06\xc6\x02F\x1e\x06&" +
|
|
||||||
"F\x1e\x06\xc6\xfa\xe4\xfc\xbc\x92\xd4\xbc\x12F^\x06&" +
|
|
||||||
"F^\x06F@\x00\x00\x00\xff\xff\xdaK$\x1a"
|
|
||||||
|
|
||||||
func init() {
|
|
||||||
schemas.Register(schema_8f43375162194466,
|
|
||||||
0xa13a07c504a5ab64)
|
|
||||||
}
|
|
|
@ -1,71 +0,0 @@
|
||||||
package sshlog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
capnp "zombiezen.com/go/capnproto2"
|
|
||||||
"zombiezen.com/go/capnproto2/pogs"
|
|
||||||
)
|
|
||||||
|
|
||||||
// SessionLogger will buffer and write events to disk using capnp proto for session replay
|
|
||||||
type SessionLogger struct {
|
|
||||||
logger *Logger
|
|
||||||
encoder *capnp.Encoder
|
|
||||||
}
|
|
||||||
|
|
||||||
type sessionLogData struct {
|
|
||||||
Timestamp string // The UTC timestamp of when the log occurred
|
|
||||||
Content []byte // The shell output
|
|
||||||
}
|
|
||||||
|
|
||||||
// NewSessionLogger creates a new session logger by encapsulating a Logger object and writing capnp encoded messages to it
|
|
||||||
func NewSessionLogger(filename string, logger logger.Service, flushInterval time.Duration, maxFileSize int64) (*SessionLogger, error) {
|
|
||||||
l, err := NewLogger(filename, logger, flushInterval, maxFileSize)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
sessionLogger := &SessionLogger{
|
|
||||||
logger: l,
|
|
||||||
encoder: capnp.NewEncoder(l),
|
|
||||||
}
|
|
||||||
return sessionLogger, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Writes to a log buffer. Implements the io.Writer interface.
|
|
||||||
func (l *SessionLogger) Write(p []byte) (n int, err error) {
|
|
||||||
return l.writeSessionLog(&sessionLogData{
|
|
||||||
Timestamp: time.Now().UTC().Format(time.RFC3339),
|
|
||||||
Content: p,
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close drains anything left in the buffer and cleans up any resources still
|
|
||||||
// in use.
|
|
||||||
func (l *SessionLogger) Close() error {
|
|
||||||
return l.logger.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
func (l *SessionLogger) writeSessionLog(p *sessionLogData) (int, error) {
|
|
||||||
msg, seg, err := capnp.NewMessage(capnp.SingleSegment(nil))
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
log, err := NewRootSessionLog(seg)
|
|
||||||
if err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
log.SetTimestamp(p.Timestamp)
|
|
||||||
log.SetContent(p.Content)
|
|
||||||
|
|
||||||
if err := l.encoder.Encode(msg); err != nil {
|
|
||||||
return 0, err
|
|
||||||
}
|
|
||||||
return len(p.Content), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func unmarshalSessionLog(s SessionLog) (*sessionLogData, error) {
|
|
||||||
p := new(sessionLogData)
|
|
||||||
err := pogs.Extract(p, SessionLog_TypeID, s.Struct)
|
|
||||||
return p, err
|
|
||||||
}
|
|
|
@ -1,69 +0,0 @@
|
||||||
package sshlog
|
|
||||||
|
|
||||||
import (
|
|
||||||
"os"
|
|
||||||
"testing"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
capnp "zombiezen.com/go/capnproto2"
|
|
||||||
)
|
|
||||||
|
|
||||||
const sessionLogFileName = "test-session-logger.log"
|
|
||||||
|
|
||||||
func createSessionLogger(t *testing.T) *SessionLogger {
|
|
||||||
os.Remove(sessionLogFileName)
|
|
||||||
l := logger.NewOutputWriter(logger.NewMockWriteManager())
|
|
||||||
logger, err := NewSessionLogger(sessionLogFileName, l, time.Millisecond, 1024)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("couldn't create the logger!", err)
|
|
||||||
}
|
|
||||||
return logger
|
|
||||||
}
|
|
||||||
|
|
||||||
func TestSessionLogWrite(t *testing.T) {
|
|
||||||
testStr := "hi"
|
|
||||||
logger := createSessionLogger(t)
|
|
||||||
defer func() {
|
|
||||||
os.Remove(sessionLogFileName)
|
|
||||||
}()
|
|
||||||
|
|
||||||
logger.Write([]byte(testStr))
|
|
||||||
logger.Close()
|
|
||||||
|
|
||||||
f, err := os.Open(sessionLogFileName)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("couldn't read the log file!", err)
|
|
||||||
}
|
|
||||||
defer f.Close()
|
|
||||||
|
|
||||||
msg, err := capnp.NewDecoder(f).Decode()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("couldn't read the capnp msg file!", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
sessionLog, err := ReadRootSessionLog(msg)
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("couldn't read the session log from the msg!", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
timeStr, err := sessionLog.Timestamp()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("couldn't read the Timestamp field!", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
_, terr := time.Parse(time.RFC3339, timeStr)
|
|
||||||
if terr != nil {
|
|
||||||
t.Fatal("couldn't parse the Timestamp into the expected RFC3339 format", terr)
|
|
||||||
}
|
|
||||||
|
|
||||||
data, err := sessionLog.Content()
|
|
||||||
if err != nil {
|
|
||||||
t.Fatal("couldn't read the Content field!", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
checkStr := string(data)
|
|
||||||
if checkStr != testStr {
|
|
||||||
t.Fatal("file data doesn't match!")
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,114 +0,0 @@
|
||||||
//+build !windows
|
|
||||||
|
|
||||||
package sshserver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
|
||||||
"crypto/rsa"
|
|
||||||
"crypto/x509"
|
|
||||||
"encoding/pem"
|
|
||||||
"fmt"
|
|
||||||
"io/ioutil"
|
|
||||||
"os"
|
|
||||||
"path/filepath"
|
|
||||||
|
|
||||||
"github.com/gliderlabs/ssh"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
rsaFilename = "ssh_host_rsa_key"
|
|
||||||
ecdsaFilename = "ssh_host_ecdsa_key"
|
|
||||||
)
|
|
||||||
|
|
||||||
var defaultHostKeyDir = filepath.Join(".cloudflared", "host_keys")
|
|
||||||
|
|
||||||
func (s *SSHProxy) configureHostKeys(hostKeyDir string) error {
|
|
||||||
if hostKeyDir == "" {
|
|
||||||
homeDir, err := os.UserHomeDir()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
hostKeyDir = filepath.Join(homeDir, defaultHostKeyDir)
|
|
||||||
}
|
|
||||||
|
|
||||||
if _, err := os.Stat(hostKeyDir); os.IsNotExist(err) {
|
|
||||||
if err := os.MkdirAll(hostKeyDir, 0755); err != nil {
|
|
||||||
return errors.Wrap(err, fmt.Sprintf("Error creating %s directory", hostKeyDir))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.configureECDSAKey(hostKeyDir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := s.configureRSAKey(hostKeyDir); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SSHProxy) configureRSAKey(basePath string) error {
|
|
||||||
keyPath := filepath.Join(basePath, rsaFilename)
|
|
||||||
if _, err := os.Stat(keyPath); os.IsNotExist(err) {
|
|
||||||
key, err := rsa.GenerateKey(rand.Reader, 2048)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "Error generating RSA host key")
|
|
||||||
}
|
|
||||||
|
|
||||||
privateKey := &pem.Block{
|
|
||||||
Type: "RSA PRIVATE KEY",
|
|
||||||
Bytes: x509.MarshalPKCS1PrivateKey(key),
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = writePrivateKey(keyPath, privateKey); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Debugf("Created new RSA SSH host key: %s", keyPath)
|
|
||||||
}
|
|
||||||
if err := s.SetOption(ssh.HostKeyFile(keyPath)); err != nil {
|
|
||||||
return errors.Wrap(err, "Could not set SSH RSA host key")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SSHProxy) configureECDSAKey(basePath string) error {
|
|
||||||
keyPath := filepath.Join(basePath, ecdsaFilename)
|
|
||||||
if _, err := os.Stat(keyPath); os.IsNotExist(err) {
|
|
||||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "Error generating ECDSA host key")
|
|
||||||
}
|
|
||||||
|
|
||||||
keyBytes, err := x509.MarshalECPrivateKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "Error marshalling ECDSA key")
|
|
||||||
}
|
|
||||||
|
|
||||||
privateKey := &pem.Block{
|
|
||||||
Type: "EC PRIVATE KEY",
|
|
||||||
Bytes: keyBytes,
|
|
||||||
}
|
|
||||||
|
|
||||||
if err = writePrivateKey(keyPath, privateKey); err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logger.Debugf("Created new ECDSA SSH host key: %s", keyPath)
|
|
||||||
}
|
|
||||||
if err := s.SetOption(ssh.HostKeyFile(keyPath)); err != nil {
|
|
||||||
return errors.Wrap(err, "Could not set SSH ECDSA host key")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func writePrivateKey(keyPath string, privateKey *pem.Block) error {
|
|
||||||
if err := ioutil.WriteFile(keyPath, pem.EncodeToMemory(privateKey), 0600); err != nil {
|
|
||||||
return errors.Wrap(err, fmt.Sprintf("Error writing host key to %s", keyPath))
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
package sshserver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"github.com/stretchr/testify/assert"
|
|
||||||
"github.com/stretchr/testify/require"
|
|
||||||
"testing"
|
|
||||||
)
|
|
||||||
|
|
||||||
func TestHasPort(t *testing.T) {
|
|
||||||
type testCase struct {
|
|
||||||
input string
|
|
||||||
expectedOutput string
|
|
||||||
}
|
|
||||||
|
|
||||||
tests := []testCase{
|
|
||||||
{"localhost", "localhost:22"},
|
|
||||||
{"other.addr:22", "other.addr:22"},
|
|
||||||
{"[2001:db8::1]:8080", "[2001:db8::1]:8080"},
|
|
||||||
{"[::1]", "[::1]:22"},
|
|
||||||
{"2001:0db8:3c4d:0015:0000:0000:1a2f:1234", "[2001:0db8:3c4d:0015:0000:0000:1a2f:1234]:22"},
|
|
||||||
{"::1", "[::1]:22"},
|
|
||||||
}
|
|
||||||
|
|
||||||
for _, test := range tests {
|
|
||||||
out, err := canonicalizeDest(test.input)
|
|
||||||
require.Nil(t, err)
|
|
||||||
assert.Equal(t, test.expectedOutput, out)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,491 +0,0 @@
|
||||||
//+build !windows
|
|
||||||
|
|
||||||
package sshserver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"crypto/ecdsa"
|
|
||||||
"crypto/elliptic"
|
|
||||||
"crypto/rand"
|
|
||||||
"encoding/binary"
|
|
||||||
"encoding/json"
|
|
||||||
"fmt"
|
|
||||||
"io"
|
|
||||||
"net"
|
|
||||||
"runtime"
|
|
||||||
"strings"
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/sshgen"
|
|
||||||
"github.com/cloudflare/cloudflared/sshlog"
|
|
||||||
"github.com/gliderlabs/ssh"
|
|
||||||
"github.com/google/uuid"
|
|
||||||
"github.com/pkg/errors"
|
|
||||||
gossh "golang.org/x/crypto/ssh"
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
auditEventStart = "session_start"
|
|
||||||
auditEventStop = "session_stop"
|
|
||||||
auditEventExec = "exec"
|
|
||||||
auditEventScp = "scp"
|
|
||||||
auditEventResize = "resize"
|
|
||||||
auditEventShell = "shell"
|
|
||||||
sshContextSessionID = "sessionID"
|
|
||||||
sshContextEventLogger = "eventLogger"
|
|
||||||
sshContextPreamble = "sshPreamble"
|
|
||||||
sshContextSSHClient = "sshClient"
|
|
||||||
SSHPreambleLength = 2
|
|
||||||
defaultSSHPort = "22"
|
|
||||||
)
|
|
||||||
|
|
||||||
type auditEvent struct {
|
|
||||||
Event string `json:"event,omitempty"`
|
|
||||||
EventType string `json:"event_type,omitempty"`
|
|
||||||
SessionID string `json:"session_id,omitempty"`
|
|
||||||
User string `json:"user,omitempty"`
|
|
||||||
Login string `json:"login,omitempty"`
|
|
||||||
Datetime string `json:"datetime,omitempty"`
|
|
||||||
Hostname string `json:"hostname,omitempty"`
|
|
||||||
Destination string `json:"destination,omitempty"`
|
|
||||||
}
|
|
||||||
|
|
||||||
// sshConn wraps the incoming net.Conn and a cleanup function
|
|
||||||
// This is done to allow the outgoing SSH client to be retrieved and closed when the conn itself is closed.
|
|
||||||
type sshConn struct {
|
|
||||||
net.Conn
|
|
||||||
cleanupFunc func()
|
|
||||||
}
|
|
||||||
|
|
||||||
// close calls the cleanupFunc before closing the conn
|
|
||||||
func (c sshConn) Close() error {
|
|
||||||
c.cleanupFunc()
|
|
||||||
return c.Conn.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
type SSHProxy struct {
|
|
||||||
ssh.Server
|
|
||||||
hostname string
|
|
||||||
logger logger.Service
|
|
||||||
shutdownC chan struct{}
|
|
||||||
caCert ssh.PublicKey
|
|
||||||
logManager sshlog.Manager
|
|
||||||
}
|
|
||||||
|
|
||||||
type SSHPreamble struct {
|
|
||||||
Destination string
|
|
||||||
JWT string
|
|
||||||
}
|
|
||||||
|
|
||||||
// New creates a new SSHProxy and configures its host keys and authentication by the data provided
|
|
||||||
func New(logManager sshlog.Manager, logger logger.Service, version, localAddress, hostname, hostKeyDir string, shutdownC chan struct{}, idleTimeout, maxTimeout time.Duration) (*SSHProxy, error) {
|
|
||||||
sshProxy := SSHProxy{
|
|
||||||
hostname: hostname,
|
|
||||||
logger: logger,
|
|
||||||
shutdownC: shutdownC,
|
|
||||||
logManager: logManager,
|
|
||||||
}
|
|
||||||
|
|
||||||
sshProxy.Server = ssh.Server{
|
|
||||||
Addr: localAddress,
|
|
||||||
MaxTimeout: maxTimeout,
|
|
||||||
IdleTimeout: idleTimeout,
|
|
||||||
Version: fmt.Sprintf("SSH-2.0-Cloudflare-Access_%s_%s", version, runtime.GOOS),
|
|
||||||
PublicKeyHandler: sshProxy.proxyAuthCallback,
|
|
||||||
ConnCallback: sshProxy.connCallback,
|
|
||||||
ChannelHandlers: map[string]ssh.ChannelHandler{
|
|
||||||
"default": sshProxy.channelHandler,
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
if err := sshProxy.configureHostKeys(hostKeyDir); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &sshProxy, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Start the SSH proxy listener to start handling SSH connections from clients
|
|
||||||
func (s *SSHProxy) Start() error {
|
|
||||||
s.logger.Infof("Starting SSH server at %s", s.Addr)
|
|
||||||
|
|
||||||
go func() {
|
|
||||||
<-s.shutdownC
|
|
||||||
if err := s.Close(); err != nil {
|
|
||||||
s.logger.Errorf("Cannot close SSH server: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
return s.ListenAndServe()
|
|
||||||
}
|
|
||||||
|
|
||||||
// proxyAuthCallback attempts to connect to ultimate SSH destination. If successful, it allows the incoming connection
|
|
||||||
// to connect to the proxy and saves the outgoing SSH client to the context. Otherwise, no connection to the
|
|
||||||
// the proxy is allowed.
|
|
||||||
func (s *SSHProxy) proxyAuthCallback(ctx ssh.Context, key ssh.PublicKey) bool {
|
|
||||||
client, err := s.dialDestination(ctx)
|
|
||||||
if err != nil {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
ctx.SetValue(sshContextSSHClient, client)
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// connCallback reads the preamble sent from the proxy server and saves an audit event logger to the context.
|
|
||||||
// If any errors occur, the connection is terminated by returning nil from the callback.
|
|
||||||
func (s *SSHProxy) connCallback(ctx ssh.Context, conn net.Conn) net.Conn {
|
|
||||||
// AUTH-2050: This is a temporary workaround of a timing issue in the tunnel muxer to allow further testing.
|
|
||||||
// TODO: Remove this
|
|
||||||
time.Sleep(10 * time.Millisecond)
|
|
||||||
|
|
||||||
preamble, err := s.readPreamble(conn)
|
|
||||||
if err != nil {
|
|
||||||
if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
|
|
||||||
s.logger.Info("Could not establish session. Client likely does not have --destination set and is using old-style ssh config")
|
|
||||||
} else if err != io.EOF {
|
|
||||||
s.logger.Errorf("failed to read SSH preamble: %s", err)
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ctx.SetValue(sshContextPreamble, preamble)
|
|
||||||
|
|
||||||
logger, sessionID, err := s.auditLogger()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Errorf("failed to configure logger: %s", err)
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
ctx.SetValue(sshContextEventLogger, logger)
|
|
||||||
ctx.SetValue(sshContextSessionID, sessionID)
|
|
||||||
|
|
||||||
// attempts to retrieve and close the outgoing ssh client when the incoming conn is closed.
|
|
||||||
// If no client exists, the conn is being closed before the PublicKeyCallback was called (where the client is created).
|
|
||||||
cleanupFunc := func() {
|
|
||||||
client, ok := ctx.Value(sshContextSSHClient).(*gossh.Client)
|
|
||||||
if ok && client != nil {
|
|
||||||
client.Close()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
return sshConn{conn, cleanupFunc}
|
|
||||||
}
|
|
||||||
|
|
||||||
// channelHandler proxies incoming and outgoing SSH traffic back and forth over an SSH Channel
|
|
||||||
func (s *SSHProxy) channelHandler(srv *ssh.Server, conn *gossh.ServerConn, newChan gossh.NewChannel, ctx ssh.Context) {
|
|
||||||
if newChan.ChannelType() != "session" && newChan.ChannelType() != "direct-tcpip" {
|
|
||||||
msg := fmt.Sprintf("channel type %s is not supported", newChan.ChannelType())
|
|
||||||
s.logger.Info(msg)
|
|
||||||
if err := newChan.Reject(gossh.UnknownChannelType, msg); err != nil {
|
|
||||||
s.logger.Errorf("Error rejecting SSH channel: %s", err)
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
localChan, localChanReqs, err := newChan.Accept()
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Errorf("Failed to accept session channel: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
defer localChan.Close()
|
|
||||||
|
|
||||||
// client will be closed when the sshConn is closed
|
|
||||||
client, ok := ctx.Value(sshContextSSHClient).(*gossh.Client)
|
|
||||||
if !ok {
|
|
||||||
s.logger.Error("Could not retrieve client from context")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
remoteChan, remoteChanReqs, err := client.OpenChannel(newChan.ChannelType(), newChan.ExtraData())
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Errorf("Failed to open remote channel: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
defer remoteChan.Close()
|
|
||||||
|
|
||||||
// Proxy ssh traffic back and forth between client and destination
|
|
||||||
s.proxyChannel(localChan, remoteChan, localChanReqs, remoteChanReqs, conn, ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
// proxyChannel couples two SSH channels and proxies SSH traffic and channel requests back and forth.
|
|
||||||
func (s *SSHProxy) proxyChannel(localChan, remoteChan gossh.Channel, localChanReqs, remoteChanReqs <-chan *gossh.Request, conn *gossh.ServerConn, ctx ssh.Context) {
|
|
||||||
done := make(chan struct{}, 2)
|
|
||||||
go func() {
|
|
||||||
if _, err := io.Copy(localChan, remoteChan); err != nil {
|
|
||||||
s.logger.Errorf("remote to local copy error: %s", err)
|
|
||||||
}
|
|
||||||
done <- struct{}{}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
if _, err := io.Copy(remoteChan, localChan); err != nil {
|
|
||||||
s.logger.Errorf("local to remote copy error: %s", err)
|
|
||||||
}
|
|
||||||
done <- struct{}{}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// stderr streams are used non-pty sessions since they have distinct IO streams.
|
|
||||||
remoteStderr := remoteChan.Stderr()
|
|
||||||
localStderr := localChan.Stderr()
|
|
||||||
go func() {
|
|
||||||
if _, err := io.Copy(remoteStderr, localStderr); err != nil {
|
|
||||||
s.logger.Errorf("stderr local to remote copy error: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
go func() {
|
|
||||||
if _, err := io.Copy(localStderr, remoteStderr); err != nil {
|
|
||||||
s.logger.Errorf("stderr remote to local copy error: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
s.logAuditEvent(conn, "", auditEventStart, ctx)
|
|
||||||
defer s.logAuditEvent(conn, "", auditEventStop, ctx)
|
|
||||||
|
|
||||||
// Proxy channel requests
|
|
||||||
for {
|
|
||||||
select {
|
|
||||||
case req := <-localChanReqs:
|
|
||||||
if req == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := s.forwardChannelRequest(remoteChan, req); err != nil {
|
|
||||||
s.logger.Errorf("Failed to forward request: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
s.logChannelRequest(req, conn, ctx)
|
|
||||||
|
|
||||||
case req := <-remoteChanReqs:
|
|
||||||
if req == nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
if err := s.forwardChannelRequest(localChan, req); err != nil {
|
|
||||||
s.logger.Errorf("Failed to forward request: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
case <-done:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
// readPreamble reads a preamble from the SSH connection before any SSH traffic is sent.
|
|
||||||
// This preamble is a JSON encoded struct containing the users JWT and ultimate destination.
|
|
||||||
// The first 4 bytes contain the length of the preamble which follows immediately.
|
|
||||||
func (s *SSHProxy) readPreamble(conn net.Conn) (*SSHPreamble, error) {
|
|
||||||
// Set conn read deadline while reading preamble to prevent hangs if preamble wasnt sent.
|
|
||||||
if err := conn.SetReadDeadline(time.Now().Add(500 * time.Millisecond)); err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to set conn deadline")
|
|
||||||
}
|
|
||||||
defer func() {
|
|
||||||
if err := conn.SetReadDeadline(time.Time{}); err != nil {
|
|
||||||
s.logger.Errorf("Failed to unset conn read deadline: %s", err)
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
size := make([]byte, SSHPreambleLength)
|
|
||||||
if _, err := io.ReadFull(conn, size); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
payloadLength := binary.BigEndian.Uint16(size)
|
|
||||||
payload := make([]byte, payloadLength)
|
|
||||||
if _, err := io.ReadFull(conn, payload); err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
var preamble SSHPreamble
|
|
||||||
err := json.Unmarshal(payload, &preamble)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
preamble.Destination, err = canonicalizeDest(preamble.Destination)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return &preamble, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// canonicalizeDest adds a default port if one doesnt exist
|
|
||||||
func canonicalizeDest(dest string) (string, error) {
|
|
||||||
_, _, err := net.SplitHostPort(dest)
|
|
||||||
// if host and port are split without error, a port exists.
|
|
||||||
if err != nil {
|
|
||||||
addrErr, ok := err.(*net.AddrError)
|
|
||||||
if !ok {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
// If the port is missing, append it.
|
|
||||||
if addrErr.Err == "missing port in address" {
|
|
||||||
return fmt.Sprintf("%s:%s", dest, defaultSSHPort), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// If there are too many colons and address is IPv6, wrap in brackets and append port. Otherwise invalid address
|
|
||||||
ip := net.ParseIP(dest)
|
|
||||||
if addrErr.Err == "too many colons in address" && ip != nil && ip.To4() == nil {
|
|
||||||
return fmt.Sprintf("[%s]:%s", dest, defaultSSHPort), nil
|
|
||||||
}
|
|
||||||
return "", addrErr
|
|
||||||
}
|
|
||||||
|
|
||||||
return dest, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// dialDestination creates a new SSH client and dials the destination server
|
|
||||||
func (s *SSHProxy) dialDestination(ctx ssh.Context) (*gossh.Client, error) {
|
|
||||||
preamble, ok := ctx.Value(sshContextPreamble).(*SSHPreamble)
|
|
||||||
if !ok {
|
|
||||||
msg := "failed to retrieve SSH preamble from context"
|
|
||||||
s.logger.Error(msg)
|
|
||||||
return nil, errors.New(msg)
|
|
||||||
}
|
|
||||||
|
|
||||||
signer, err := s.genSSHSigner(preamble.JWT)
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Errorf("Failed to generate signed short lived cert: %s", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
s.logger.Debugf("Short lived certificate for %s connecting to %s:\n\n%s", ctx.User(), preamble.Destination, gossh.MarshalAuthorizedKey(signer.PublicKey()))
|
|
||||||
|
|
||||||
clientConfig := &gossh.ClientConfig{
|
|
||||||
User: ctx.User(),
|
|
||||||
// AUTH-2103 TODO: proper host key check
|
|
||||||
HostKeyCallback: gossh.InsecureIgnoreHostKey(),
|
|
||||||
Auth: []gossh.AuthMethod{gossh.PublicKeys(signer)},
|
|
||||||
ClientVersion: ctx.ServerVersion(),
|
|
||||||
}
|
|
||||||
|
|
||||||
client, err := gossh.Dial("tcp", preamble.Destination, clientConfig)
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Errorf("Failed to connect to destination SSH server: %s", err)
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
return client, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Generates a key pair and sends public key to get signed by CA
|
|
||||||
func (s *SSHProxy) genSSHSigner(jwt string) (gossh.Signer, error) {
|
|
||||||
key, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to generate ecdsa key pair")
|
|
||||||
}
|
|
||||||
|
|
||||||
pub, err := gossh.NewPublicKey(&key.PublicKey)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to convert ecdsa public key to SSH public key")
|
|
||||||
}
|
|
||||||
|
|
||||||
pubBytes := gossh.MarshalAuthorizedKey(pub)
|
|
||||||
signedCertBytes, err := sshgen.SignCert(jwt, string(pubBytes))
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to retrieve cert from SSHCAAPI")
|
|
||||||
}
|
|
||||||
|
|
||||||
signedPub, _, _, _, err := gossh.ParseAuthorizedKey([]byte(signedCertBytes))
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to parse SSH public key")
|
|
||||||
}
|
|
||||||
|
|
||||||
cert, ok := signedPub.(*gossh.Certificate)
|
|
||||||
if !ok {
|
|
||||||
return nil, errors.Wrap(err, "failed to assert public key as certificate")
|
|
||||||
}
|
|
||||||
signer, err := gossh.NewSignerFromKey(key)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to create signer")
|
|
||||||
}
|
|
||||||
|
|
||||||
certSigner, err := gossh.NewCertSigner(cert, signer)
|
|
||||||
if err != nil {
|
|
||||||
return nil, errors.Wrap(err, "failed to create cert signer")
|
|
||||||
}
|
|
||||||
return certSigner, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// forwardChannelRequest sends request req to SSH channel sshChan, waits for reply, and sends the reply back.
|
|
||||||
func (s *SSHProxy) forwardChannelRequest(sshChan gossh.Channel, req *gossh.Request) error {
|
|
||||||
reply, err := sshChan.SendRequest(req.Type, req.WantReply, req.Payload)
|
|
||||||
if err != nil {
|
|
||||||
return errors.Wrap(err, "Failed to send request")
|
|
||||||
}
|
|
||||||
if err := req.Reply(reply, nil); err != nil {
|
|
||||||
return errors.Wrap(err, "Failed to reply to request")
|
|
||||||
}
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// logChannelRequest creates an audit log for different types of channel requests
|
|
||||||
func (s *SSHProxy) logChannelRequest(req *gossh.Request, conn *gossh.ServerConn, ctx ssh.Context) {
|
|
||||||
var eventType string
|
|
||||||
var event string
|
|
||||||
switch req.Type {
|
|
||||||
case "exec":
|
|
||||||
var payload struct{ Value string }
|
|
||||||
if err := gossh.Unmarshal(req.Payload, &payload); err != nil {
|
|
||||||
s.logger.Errorf("Failed to unmarshal channel request payload: %s:%s with error: %s", req.Type, req.Payload, err)
|
|
||||||
}
|
|
||||||
event = payload.Value
|
|
||||||
|
|
||||||
eventType = auditEventExec
|
|
||||||
if strings.HasPrefix(string(req.Payload), "scp") {
|
|
||||||
eventType = auditEventScp
|
|
||||||
}
|
|
||||||
case "shell":
|
|
||||||
eventType = auditEventShell
|
|
||||||
case "window-change":
|
|
||||||
eventType = auditEventResize
|
|
||||||
default:
|
|
||||||
return
|
|
||||||
}
|
|
||||||
s.logAuditEvent(conn, event, eventType, ctx)
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SSHProxy) auditLogger() (io.WriteCloser, string, error) {
|
|
||||||
sessionUUID, err := uuid.NewRandom()
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", errors.Wrap(err, "failed to create sessionID")
|
|
||||||
}
|
|
||||||
sessionID := sessionUUID.String()
|
|
||||||
|
|
||||||
writer, err := s.logManager.NewLogger(fmt.Sprintf("%s-event.log", sessionID), s.logger)
|
|
||||||
if err != nil {
|
|
||||||
return nil, "", errors.Wrap(err, "failed to create logger")
|
|
||||||
}
|
|
||||||
return writer, sessionID, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SSHProxy) logAuditEvent(conn *gossh.ServerConn, event, eventType string, ctx ssh.Context) {
|
|
||||||
sessionID, sessionIDOk := ctx.Value(sshContextSessionID).(string)
|
|
||||||
writer, writerOk := ctx.Value(sshContextEventLogger).(io.WriteCloser)
|
|
||||||
if !writerOk || !sessionIDOk {
|
|
||||||
s.logger.Error("Failed to retrieve audit logger from context")
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
var destination string
|
|
||||||
preamble, ok := ctx.Value(sshContextPreamble).(*SSHPreamble)
|
|
||||||
if ok {
|
|
||||||
destination = preamble.Destination
|
|
||||||
} else {
|
|
||||||
s.logger.Error("Failed to retrieve SSH preamble from context")
|
|
||||||
}
|
|
||||||
|
|
||||||
ae := auditEvent{
|
|
||||||
Event: event,
|
|
||||||
EventType: eventType,
|
|
||||||
SessionID: sessionID,
|
|
||||||
User: conn.User(),
|
|
||||||
Login: conn.User(),
|
|
||||||
Datetime: time.Now().UTC().Format(time.RFC3339),
|
|
||||||
Hostname: s.hostname,
|
|
||||||
Destination: destination,
|
|
||||||
}
|
|
||||||
data, err := json.Marshal(&ae)
|
|
||||||
if err != nil {
|
|
||||||
s.logger.Errorf("Failed to marshal audit event. malformed audit object: %s", err)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
line := string(data) + "\n"
|
|
||||||
if _, err := writer.Write([]byte(line)); err != nil {
|
|
||||||
s.logger.Errorf("Failed to write audit event: %s", err)
|
|
||||||
}
|
|
||||||
}
|
|
|
@ -1,29 +0,0 @@
|
||||||
//+build windows
|
|
||||||
|
|
||||||
package sshserver
|
|
||||||
|
|
||||||
import (
|
|
||||||
"errors"
|
|
||||||
|
|
||||||
"time"
|
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
|
||||||
"github.com/cloudflare/cloudflared/sshlog"
|
|
||||||
)
|
|
||||||
|
|
||||||
const SSHPreambleLength = 2
|
|
||||||
|
|
||||||
type SSHServer struct{}
|
|
||||||
|
|
||||||
type SSHPreamble struct {
|
|
||||||
Destination string
|
|
||||||
JWT string
|
|
||||||
}
|
|
||||||
|
|
||||||
func New(_ sshlog.Manager, _ logger.Service, _, _, _, _ string, _ chan struct{}, _, _ time.Duration) (*SSHServer, error) {
|
|
||||||
return nil, errors.New("cloudflared ssh server is not supported on windows")
|
|
||||||
}
|
|
||||||
|
|
||||||
func (s *SSHServer) Start() error {
|
|
||||||
return errors.New("cloudflared ssh server is not supported on windows")
|
|
||||||
}
|
|
|
@ -1,27 +0,0 @@
|
||||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
|
||||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn
|
|
||||||
NhAAAAAwEAAQAAAQEA0c6EklYvC9B041qEGWDNuot6G4tTVm9LCQC0vA+v2n25ru9CINV6
|
|
||||||
8IljmXBORXBwfG6PdLhg0SEabZUbsNX5WrIVbGovcghKS6GRsqI5+Quhm+o8eG042JE/hB
|
|
||||||
oYdZ19TcMEyPOGzHsx0U/BSN9ZJWVCxqN51iI6qyhz9f6jlX2LQBFEvXlhxgF3owBEf8UC
|
|
||||||
Zt/UvbZdmeeyKNQElPmiVLIJEAPCueECp7a2mjCiP3zqjDvSeeGk4CelB/1qZZ4V2n7fvb
|
|
||||||
HZjAB5JJs4KXs5o8KgvQnqgQMxiLFZ4PATt4+mxEzh4JymppbqJOo2rYwOA3TAIEWWtYRV
|
|
||||||
/ZKJ0AyhhQAAA8gciO8XHIjvFwAAAAdzc2gtcnNhAAABAQDRzoSSVi8L0HTjWoQZYM26i3
|
|
||||||
obi1NWb0sJALS8D6/afbmu70Ig1XrwiWOZcE5FcHB8bo90uGDRIRptlRuw1flashVsai9y
|
|
||||||
CEpLoZGyojn5C6Gb6jx4bTjYkT+EGhh1nX1NwwTI84bMezHRT8FI31klZULGo3nWIjqrKH
|
|
||||||
P1/qOVfYtAEUS9eWHGAXejAER/xQJm39S9tl2Z57Io1ASU+aJUsgkQA8K54QKntraaMKI/
|
|
||||||
fOqMO9J54aTgJ6UH/WplnhXaft+9sdmMAHkkmzgpezmjwqC9CeqBAzGIsVng8BO3j6bETO
|
|
||||||
HgnKamluok6jatjA4DdMAgRZa1hFX9konQDKGFAAAAAwEAAQAAAQEApVzGdKhk8ETevAst
|
|
||||||
rurze6JPHcKUbr3NQE1EJi2fBvCtF0oQrtxTx54h2GAB8Q0MO6bQfsiL1ojm0ZQCfUBJBs
|
|
||||||
jxxb9zoccS98Vilo7ybm5SdBcMjkZX1am1jCMdQCZfCpk4/kGi7yvyOe1IhG01UBodpX5X
|
|
||||||
mwTjhN+fdjW7LSiW6cKPClN49CZKgmtvI27FCt+/TtMzdCXOiJxJ4yZCzCRhSgssV0gWI1
|
|
||||||
0VJr/MHirKUvv/qCLAuOBxIr9UgdduRZUpNX+KS2rfhFEbjnUqc/57aAakpQmuPB5I+s9G
|
|
||||||
DnrF0HSHpq7u1XC1SvYlnFBN/0A7Hw/MX2SaBFH7mc9AAQAAAIAFuTHr6O8tCvWEawfxC0
|
|
||||||
qiAPQ+Yy1vthq5uewmuQujMutUnc9JAUl32PdU1DbS7APC1Dg9XL7SyAB6A+ZpRJRAKgCY
|
|
||||||
SneAKE6hOytH+yM206aekrz6VuZiSpBqpfEqDibVAaZIO8sv/9dtZd6kWemxNErPQoKJey
|
|
||||||
Z7/cuWUWQovAAAAIEA6ugIlVj1irPmElyCCt5YfPv2x8Dl54ELoP/WsffsrPHNQog64hFd
|
|
||||||
ahD7Wq63TA566bN85fkx8OVU5TbbEQmkHgOEV6nDRY2YsBSqIOblA/KehtfdUIqZB0iNBh
|
|
||||||
Gn6TV/z6HwnSR3gKv4b66Gveek6LfRAG3mbsLCgyRAbYgn6YUAAACBAOSlf+n1eh6yjtvF
|
|
||||||
Zecq3Zslj7O8cUs17PQx4vQ7sXNCFrIZdevWPIn9sVrt7/hsTrXunDz6eXCeclB35KZe3H
|
|
||||||
WPVjRoD+xnr5+sXx2qXOnKCR0LdFybso6IR5bXAI6DNSNfP7D9LPEQ+R73Jk0jPuLYzocS
|
|
||||||
iM89KZiuGpzr01gBAAAAEW1pa2VAQzAyWTUwVEdKR0g4AQ==
|
|
||||||
-----END OPENSSH PRIVATE KEY-----
|
|
|
@ -1 +0,0 @@
|
||||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDRzoSSVi8L0HTjWoQZYM26i3obi1NWb0sJALS8D6/afbmu70Ig1XrwiWOZcE5FcHB8bo90uGDRIRptlRuw1flashVsai9yCEpLoZGyojn5C6Gb6jx4bTjYkT+EGhh1nX1NwwTI84bMezHRT8FI31klZULGo3nWIjqrKHP1/qOVfYtAEUS9eWHGAXejAER/xQJm39S9tl2Z57Io1ASU+aJUsgkQA8K54QKntraaMKI/fOqMO9J54aTgJ6UH/WplnhXaft+9sdmMAHkkmzgpezmjwqC9CeqBAzGIsVng8BO3j6bETOHgnKamluok6jatjA4DdMAgRZa1hFX9konQDKGF mike@C02Y50TGJGH8
|
|
|
@ -1,49 +0,0 @@
|
||||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
|
||||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAACFwAAAAdzc2gtcn
|
|
||||||
NhAAAAAwEAAQAAAgEA60Kneo87qPsh+zErWFl7vx93c7fyTxbZ9lUNqafgXy/BLOCc/nQS
|
|
||||||
McosVSLsQrbHlhYzfmZEhTiubmuYUrHchmsn1ml1HIqP8T5aDgtNbLqYnS4H5oO4Sj1+XH
|
|
||||||
lQtU7n7zHXgca9SnMWt1Fhkx1mvkeiOKs0eq7hV2TuIZxfmbYfIVvJGwrL0uWzbSEE1gvx
|
|
||||||
gTXZHxEChIQyrNviljgi4u2MD/cIi6KMeYUnaTL1FxO9G4GIFiy7ueHRwOZPIFHgYm+Vrt
|
|
||||||
X7XafSF0///zCrC63zzWt/6A06hFepOz2VXvm7SdckaR7qMXAb7kipsc0+dKk9ggU7Fqpx
|
|
||||||
ZY5cVeZo9RlRVhRXGDy7mABA/FMwvv+qYCgJ3nlZbdKbaiPLQu8ScTlJ9sMI06/ZiEY04b
|
|
||||||
meZ0ASM52gaDGjrFbbnuHNf5XV/oreEUhtCrryFnoIxmKgHznGjZ55q77FtTHnrAKFmKFP
|
|
||||||
11s3MLIX9o4RgtriOtl4KenkIfUumgtrwY/UGjOaOQUOrVH1am54wkUiVEF0Qd3AD8KCl/
|
|
||||||
l/xT5+t6cOspZ9GIhwa2NBmRjN/wVGp+Yrb08Re3kxPCX9bs5iLe+kHN0vuFr7RDo+eUoi
|
|
||||||
SPhWl6FUqx2W9NZqekmEgKn3oKrfbGaMH1VLkaKWlzQ4xJzP0iadQbIXGryLEYASydemZt
|
|
||||||
sAAAdQ/ovjxf6L48UAAAAHc3NoLXJzYQAAAgEA60Kneo87qPsh+zErWFl7vx93c7fyTxbZ
|
|
||||||
9lUNqafgXy/BLOCc/nQSMcosVSLsQrbHlhYzfmZEhTiubmuYUrHchmsn1ml1HIqP8T5aDg
|
|
||||||
tNbLqYnS4H5oO4Sj1+XHlQtU7n7zHXgca9SnMWt1Fhkx1mvkeiOKs0eq7hV2TuIZxfmbYf
|
|
||||||
IVvJGwrL0uWzbSEE1gvxgTXZHxEChIQyrNviljgi4u2MD/cIi6KMeYUnaTL1FxO9G4GIFi
|
|
||||||
y7ueHRwOZPIFHgYm+VrtX7XafSF0///zCrC63zzWt/6A06hFepOz2VXvm7SdckaR7qMXAb
|
|
||||||
7kipsc0+dKk9ggU7FqpxZY5cVeZo9RlRVhRXGDy7mABA/FMwvv+qYCgJ3nlZbdKbaiPLQu
|
|
||||||
8ScTlJ9sMI06/ZiEY04bmeZ0ASM52gaDGjrFbbnuHNf5XV/oreEUhtCrryFnoIxmKgHznG
|
|
||||||
jZ55q77FtTHnrAKFmKFP11s3MLIX9o4RgtriOtl4KenkIfUumgtrwY/UGjOaOQUOrVH1am
|
|
||||||
54wkUiVEF0Qd3AD8KCl/l/xT5+t6cOspZ9GIhwa2NBmRjN/wVGp+Yrb08Re3kxPCX9bs5i
|
|
||||||
Le+kHN0vuFr7RDo+eUoiSPhWl6FUqx2W9NZqekmEgKn3oKrfbGaMH1VLkaKWlzQ4xJzP0i
|
|
||||||
adQbIXGryLEYASydemZtsAAAADAQABAAACABUYzBYEhDAaHSj+dsmcdKll8/tPko4fGXqq
|
|
||||||
k+gT4t4GVUdl+Q4kcIFAhQs5b4BoDava39FE8H4V4CaMxYMc6g6vy0nB+TuO/Wt/0OmTf+
|
|
||||||
TxMsBdoV29kCgwLYWzZ1Zq9geQK6g6nzzu5ymXRa3ApDcKC3UTfUhHKHQC3AvtjvEk0NPX
|
|
||||||
/EfNhwuph5aQsHNVbNnOb2MGznf9tuGjckVQUWiSLs47s+t5rykylJ8tb6cbIQk3a3G5nz
|
|
||||||
gDFSE8Rfo6/Wk2YnDkRX9XjlKC3Q0QWzZX6hYQvs6baRT3G3jxg9SZhn8PqPc4S34VdJvA
|
|
||||||
rl8AbcpeZuKi/3J/5F1cD9GwMNcl4gM87piF20/r9mMvC4zBAEgyF8WBi4OjSu0+ccsEsb
|
|
||||||
GSpxKK04OPTB7p8mLJ8hQUiREg5OuPEEcAoDSuHgdliE7nDHzuImbpTcAZcWhkJaUdBWI6
|
|
||||||
qcnGPARzxAOmuzkY8Gq0MtcWge5QxnLWJyrfy43M984Cvxql/maLUij4eTbMDDwV7Qx30V
|
|
||||||
P2tJp5+hOnitRwB6cQIg5N7/cTQdJ6eiFYuw0v3IfHjYmaolY8F3u38Zv2PPk50CorPRDG
|
|
||||||
esx0a9Elm2UKPb145MtHGZtLH2mayRnDjnxr25iLwgokI06tCLCNvbkYLA7wVpJn81eKmZ
|
|
||||||
tQBtbfqBSiDiLjCrehAAABAQDh8vmgPR95Kx1DeBxzMSja7TStP5V58JcUvLP4dAN6zqtt
|
|
||||||
rMuKDfJRSIVYGkU8vXlME5E6phWP7R5lYY7+kLDbeZrYQg1AxkK8y4fUYkCLBuEcCjzWDK
|
|
||||||
oqZQNskk4urbCdBIP6AR7d/LMCHBb9rk2iOuUeos6JHRKbPGP1lvH3hLkbH9CA0F41sz86
|
|
||||||
JFg6u/XaRQ2CyhS7y7SQ8dmaANGz9LGdIRqIoZ8Hfht8t1VRbM9fzSb3xoxUItbHpk9R9g
|
|
||||||
GZsHSryi7AtRmHt0uBrWIv6RbIY0epCbjdCLvHflbkPgwBM7UndgkOSIwQ4SQF8Fs+e9/r
|
|
||||||
hV05h0Y81vd1RZvOAAABAQD5EgW3SpmYzeMmiP7MKkfIlbZtwVmRu4anTzWxlk5pJ9GXnC
|
|
||||||
QoInULCipWAOeJbuLIgRWLU4VzhOUbYLNKQPXECARfgoto2VXoXZZ2q2O4aXaCpeyU6nE8
|
|
||||||
VKbp4nU1jEg5hWB3PRwZ8Pzs4A93/9mrpVzLmCT+LW9Rlnp6tTpqcUKGugg8vr64SSgqnV
|
|
||||||
ZFyQgHgw+ZGOG9w714urS3U97WNTeHXAs0p2YBOu5XW3JQ3jkRo7YyZF3+TtBxbgfHRZfH
|
|
||||||
O2mFcMBD3Sn4t+LAbgnLye3S2/WZf/gQwdVB7BgrVqguzQ2hGoOxNiwadkIDsxb6r/u3n6
|
|
||||||
2lScpHFDS0WnpRAAABAQDxzkV52VX6wAWkQe/2KFH9wTG0XFANmZUnnTPR8wd+b9E7HIr0
|
|
||||||
Mdd8iAHOhLRvTy8mih53GGBptXK7GdABMZtkqDErbXhuC8xbi9uRLEHiRe/oBfWr8vYIZY
|
|
||||||
awiw3/EqxaTv0HBMicdr2S31Bs2/mjrVuJH0wAaI9ueQnZizzjgWuzeNZMWq1qk0akUUdm
|
|
||||||
PDVd58yBkt8lKlkOG0LJAn6JEG9oH9XiTFShHzu1dQmoC2bKVHdxL8WCcYFVtmyoMRcLZq
|
|
||||||
u6d4nyKha02cYZB5hM3VcizJI5HY/A+H3fBkRR0hXgkU5R89w+8x9VSJkNVx+JGC7ziK4a
|
|
||||||
kUjfOmR5WBdrAAAAE3Rlc3RAY2xvdWRmbGFyZS5jb20BAgMEBQYH
|
|
||||||
-----END OPENSSH PRIVATE KEY-----
|
|
|
@ -1 +0,0 @@
|
||||||
ssh-rsa-cert-v01@openssh.com AAAAHHNzaC1yc2EtY2VydC12MDFAb3BlbnNzaC5jb20AAAAgOsuFqKdzp/nC3wQfKVJBdHa8axtGryKplPkDjdSXT4kAAAADAQABAAACAQDrQqd6jzuo+yH7MStYWXu/H3dzt/JPFtn2VQ2pp+BfL8Es4Jz+dBIxyixVIuxCtseWFjN+ZkSFOK5ua5hSsdyGayfWaXUcio/xPloOC01supidLgfmg7hKPX5ceVC1TufvMdeBxr1Kcxa3UWGTHWa+R6I4qzR6ruFXZO4hnF+Zth8hW8kbCsvS5bNtIQTWC/GBNdkfEQKEhDKs2+KWOCLi7YwP9wiLoox5hSdpMvUXE70bgYgWLLu54dHA5k8gUeBib5Wu1ftdp9IXT///MKsLrfPNa3/oDTqEV6k7PZVe+btJ1yRpHuoxcBvuSKmxzT50qT2CBTsWqnFljlxV5mj1GVFWFFcYPLuYAED8UzC+/6pgKAneeVlt0ptqI8tC7xJxOUn2wwjTr9mIRjThuZ5nQBIznaBoMaOsVtue4c1/ldX+it4RSG0KuvIWegjGYqAfOcaNnnmrvsW1MeesAoWYoU/XWzcwshf2jhGC2uI62Xgp6eQh9S6aC2vBj9QaM5o5BQ6tUfVqbnjCRSJUQXRB3cAPwoKX+X/FPn63pw6yln0YiHBrY0GZGM3/BUan5itvTxF7eTE8Jf1uzmIt76Qc3S+4WvtEOj55SiJI+FaXoVSrHZb01mp6SYSAqfegqt9sZowfVUuRopaXNDjEnM/SJp1BshcavIsRgBLJ16Zm2wAAAAAAAAAAAAAAAQAAAA10ZXN0VXNlckB0ZXN0AAAADAAAAAh0ZXN0VXNlcgAAAAAAAAAA//////////8AAAAAAAAAggAAABVwZXJtaXQtWDExLWZvcndhcmRpbmcAAAAAAAAAF3Blcm1pdC1hZ2VudC1mb3J3YXJkaW5nAAAAAAAAABZwZXJtaXQtcG9ydC1mb3J3YXJkaW5nAAAAAAAAAApwZXJtaXQtcHR5AAAAAAAAAA5wZXJtaXQtdXNlci1yYwAAAAAAAAAAAAABFwAAAAdzc2gtcnNhAAAAAwEAAQAAAQEA0c6EklYvC9B041qEGWDNuot6G4tTVm9LCQC0vA+v2n25ru9CINV68IljmXBORXBwfG6PdLhg0SEabZUbsNX5WrIVbGovcghKS6GRsqI5+Quhm+o8eG042JE/hBoYdZ19TcMEyPOGzHsx0U/BSN9ZJWVCxqN51iI6qyhz9f6jlX2LQBFEvXlhxgF3owBEf8UCZt/UvbZdmeeyKNQElPmiVLIJEAPCueECp7a2mjCiP3zqjDvSeeGk4CelB/1qZZ4V2n7fvbHZjAB5JJs4KXs5o8KgvQnqgQMxiLFZ4PATt4+mxEzh4JymppbqJOo2rYwOA3TAIEWWtYRV/ZKJ0AyhhQAAAQ8AAAAHc3NoLXJzYQAAAQC2lL+6JYTGOdz1zNnck6onrFcVpO2onCVAKP8HdLoCeH0/upIugaCocPKuzoURYEfiHQotviNeprE/2CyAroJ5VBdqWftEeHn3FFvBCQ1gwRQ7oci4C5n72t0vjWWE6WBylS0RqpJjr6EQ8a1vuwIqAQrEJPp2yNLjRH2WD7eicBh5f43VKOMr73DtyTh4xoF0C2sNBROudt58npTaYqRHQgoI25V/aCmuYBgM3wdAGcoEZGoSerMfhID7GcWkvemq2hF8mQsspG3zgnyQXk+ahagmefzxutDnr3KdrZ637La0/XwABvBZ9L4l5RiEilVI1Shl96F2qbBW2YZ64pUQ test@cloudflare.com
|
|
|
@ -1 +0,0 @@
|
||||||
ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDrQqd6jzuo+yH7MStYWXu/H3dzt/JPFtn2VQ2pp+BfL8Es4Jz+dBIxyixVIuxCtseWFjN+ZkSFOK5ua5hSsdyGayfWaXUcio/xPloOC01supidLgfmg7hKPX5ceVC1TufvMdeBxr1Kcxa3UWGTHWa+R6I4qzR6ruFXZO4hnF+Zth8hW8kbCsvS5bNtIQTWC/GBNdkfEQKEhDKs2+KWOCLi7YwP9wiLoox5hSdpMvUXE70bgYgWLLu54dHA5k8gUeBib5Wu1ftdp9IXT///MKsLrfPNa3/oDTqEV6k7PZVe+btJ1yRpHuoxcBvuSKmxzT50qT2CBTsWqnFljlxV5mj1GVFWFFcYPLuYAED8UzC+/6pgKAneeVlt0ptqI8tC7xJxOUn2wwjTr9mIRjThuZ5nQBIznaBoMaOsVtue4c1/ldX+it4RSG0KuvIWegjGYqAfOcaNnnmrvsW1MeesAoWYoU/XWzcwshf2jhGC2uI62Xgp6eQh9S6aC2vBj9QaM5o5BQ6tUfVqbnjCRSJUQXRB3cAPwoKX+X/FPn63pw6yln0YiHBrY0GZGM3/BUan5itvTxF7eTE8Jf1uzmIt76Qc3S+4WvtEOj55SiJI+FaXoVSrHZb01mp6SYSAqfegqt9sZowfVUuRopaXNDjEnM/SJp1BshcavIsRgBLJ16Zm2w== test@cloudflare.com
|
|
|
@ -1,27 +0,0 @@
|
||||||
-----BEGIN OPENSSH PRIVATE KEY-----
|
|
||||||
b3BlbnNzaC1rZXktdjEAAAAABG5vbmUAAAAEbm9uZQAAAAAAAAABAAABFwAAAAdzc2gtcn
|
|
||||||
NhAAAAAwEAAQAAAQEAzBO7TXxbpk7sGQm/Wa29N/NFe5uuoEQGC5hxfihmcvVgeKeNKiSS
|
|
||||||
snxzCE1Y6SmNMoE4aQs92wtcn48GmxRwZSXbCqLq2CJrHfe9B2k3aPkJZpQkFMshcJGo7p
|
|
||||||
G0Vlo7dWAbYf99/YKddf290uLK7vxw9ty0pM1hXSXHNShv1b+bTQm/COMZ5jNsncjc1yBH
|
|
||||||
KGkFVHee9Dh4Z0xLlHipIyyNXXzI0RFYuHSNJz9GD310XQLIIroptr7+/7g6+sPPGsNlI+
|
|
||||||
95OScba1/PQ2b/qy+KyIwNIMSd9ziJy5xnO7Vo3LrqQrza1Pkn2i29PljUcbc/F0hhXNIq
|
|
||||||
ITdNWwVqsQAAA8iKllTIipZUyAAAAAdzc2gtcnNhAAABAQDME7tNfFumTuwZCb9Zrb0380
|
|
||||||
V7m66gRAYLmHF+KGZy9WB4p40qJJKyfHMITVjpKY0ygThpCz3bC1yfjwabFHBlJdsKourY
|
|
||||||
Imsd970HaTdo+QlmlCQUyyFwkajukbRWWjt1YBth/339gp11/b3S4sru/HD23LSkzWFdJc
|
|
||||||
c1KG/Vv5tNCb8I4xnmM2ydyNzXIEcoaQVUd570OHhnTEuUeKkjLI1dfMjREVi4dI0nP0YP
|
|
||||||
fXRdAsgiuim2vv7/uDr6w88aw2Uj73k5JxtrX89DZv+rL4rIjA0gxJ33OInLnGc7tWjcuu
|
|
||||||
pCvNrU+SfaLb0+WNRxtz8XSGFc0iohN01bBWqxAAAAAwEAAQAAAQAKEtNFEOVpQS4QUlXa
|
|
||||||
tGPJtj1wy4+EI7d0rRK1GoNsG0amzgZ+1Q1UuCXpe//uinmIy64gKUjlXhs1WRcHYqvlok
|
|
||||||
e8r6wN/Szybr8q9Xuht+FJ6fgZ+qjs6JPBKvoO5SdYNOVFIhpzABaLs3nCRiWkRFvDI8Pa
|
|
||||||
+rRap7m8mwFiOJtmdiIZYFxzw6xXwTsGCrWPKgTv3FKGZzXnCB9i7jC2vwT1MDYbcnzEH4
|
|
||||||
Ba4dxI8bp6WWEX0biRIXj3jCtLb5gisNTSxdZs254Syh75HEXunSh2YO+yVSWQtZj19ewW
|
|
||||||
6Rb1Z3x5rVfXcgSkg7gZd9EpbckIIg6+MFSH3wdGW6atAAAAgQDFXiMuNd4ZYwdyhjlM5n
|
|
||||||
nFqQDXGgnwyNdiIqAapoqTdF5aZwNnbTU0fCFaDMLCQAHgntcgCEsW9A4HzDzYhOABKElv
|
|
||||||
j973vXWF165wFiZwuKSfroq/6JH6CiIcjiqpszbnqSOzy1hq913RWILS6e9yMjxRv8PUjm
|
|
||||||
E+IkcnfcFUwAAAAIEA+jwI3ICe8PGEIezV2tvQFeQy2Z2wGslu1yvqfTYEztSmtygns3wn
|
|
||||||
ZBb+cBXCnpqUCtznG7hZhq7I4m1I47BYznULwwFiBTVtBASG5wNP7zeVKTVZ4SKprze+Fe
|
|
||||||
I/nUZDJ5Q26um7eDbhvZ/n95GY+fucMVHoSBfX1wE16XBfp88AAACBANDHcgC4qP2oyOw/
|
|
||||||
+p9HineMQd/ppG3fePe07jyZXLHLf0rByFveFgRAQ1m77O7FtP3fFKy3Y9nNy18LGq35ZK
|
|
||||||
Blsz2B23bO8NuffgAhchDG7KzKFXCo+AraIj5znp/znK5zIkaiiSOQaYywJ36EooYVpRtj
|
|
||||||
ep5ap6bBFDZ2e+V/AAAAEW1pa2VAQzAyWTUwVEdKR0g4AQ==
|
|
||||||
-----END OPENSSH PRIVATE KEY-----
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue