diff --git a/Gopkg.lock b/Gopkg.lock index 1a99b7a3..d86277ed 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -73,15 +73,9 @@ version = "v1.2.0" [[projects]] - digest = "1:6a503e232df389d94ebb97dfb22d4ae463b6e2f351660613e11d9e42f57ab6df" + digest = "1:6f70106e7bc1c803e8a0a4519e09c12d154771acfa2559206e97b033bbd1dd38" name = "github.com/coreos/go-oidc" - packages = [ - "http", - "jose", - "key", - "oauth2", - "oidc", - ] + packages = ["jose"] pruneopts = "UT" revision = "a93f71fdfe73d2c0f5413c0565eea0af6523a6df" @@ -93,18 +87,6 @@ revision = "95778dfbb74eb7e4dbaf43bf7d71809650ef8076" version = "v19" -[[projects]] - digest = "1:6fda0d7f5e52b081e075775b1ecebf1ea0c923e7be33604ed0225ae078e701b5" - name = "github.com/coreos/pkg" - packages = [ - "health", - "httputil", - "timeutil", - ] - pruneopts = "UT" - revision = "97fdf19511ea361ae1c100dd393cc47f8dcfa1e1" - version = "v4" - [[projects]] digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" name = "github.com/davecgh/go-spew" @@ -212,14 +194,6 @@ pruneopts = "UT" revision = "8e809c8a86450a29b90dcc9efbf062d0fe6d9746" -[[projects]] - digest = "1:75ab90ae3f5d876167e60f493beadfe66f0ed861a710f283fb06c86437a09538" - name = "github.com/jonboulle/clockwork" - packages = ["."] - pruneopts = "UT" - revision = "2eee05ed794112d45db504eb05aa693efd2b8b09" - version = "v0.1.0" - [[projects]] digest = "1:31e761d97c76151dde79e9d28964a812c46efc5baee4085b86f68f0c654450de" name = "github.com/konsorten/go-windows-terminal-sequences" @@ -291,6 +265,14 @@ revision = "af06845cf3004701891bf4fdb884bfe4920b3727" version = "v1.1.0" +[[projects]] + digest = "1:53bc4cd4914cd7cd52139990d5170d6dc99067ae31c56530621b18b35fc30318" + name = "github.com/mitchellh/mapstructure" + packages = ["."] + pruneopts = "UT" + revision = "3536a929edddb9a5b34bd6861dc4a9647cb459fe" + version = "v1.1.2" + [[projects]] digest = "1:11e62d6050198055e6cd87ed57e5d8c669e84f839c16e16f192374d913d1a70d" name = "github.com/opentracing/opentracing-go" @@ -577,7 +559,6 @@ "github.com/coredns/coredns/plugin/pkg/rcode", "github.com/coredns/coredns/request", "github.com/coreos/go-oidc/jose", - "github.com/coreos/go-oidc/oidc", "github.com/coreos/go-systemd/daemon", "github.com/elgs/gosqljson", "github.com/equinox-io/equinox", @@ -591,6 +572,7 @@ "github.com/mattn/go-colorable", "github.com/miekg/dns", "github.com/mitchellh/go-homedir", + "github.com/mitchellh/mapstructure", "github.com/pkg/errors", "github.com/prometheus/client_golang/prometheus", "github.com/prometheus/client_golang/prometheus/promhttp", diff --git a/Gopkg.toml b/Gopkg.toml index bc97202a..319d4f8b 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -85,3 +85,7 @@ [[constraint]] name = "github.com/google/uuid" version = "=1.1.1" + +[[constraint]] + name = "github.com/mitchellh/mapstructure" + version = "1.1.2" diff --git a/cmd/cloudflared/tunnel/cmd.go b/cmd/cloudflared/tunnel/cmd.go index 298487f1..f6f027c3 100644 --- a/cmd/cloudflared/tunnel/cmd.go +++ b/cmd/cloudflared/tunnel/cmd.go @@ -12,7 +12,6 @@ import ( "syscall" "time" - "github.com/cloudflare/cloudflared/h2mux" "github.com/cloudflare/cloudflared/tunnelrpc/pogs" "github.com/cloudflare/cloudflared/connection" @@ -386,6 +385,17 @@ func startDeclarativeTunnel(ctx context.Context, logger.WithError(err) return err } + reverseProxyConfig, err := pogs.NewReverseProxyConfig( + c.String("hostname"), + reverseProxyOrigin, + c.Uint64("retries"), + c.Duration("proxy-connection-timeout"), + c.Uint64("compression-quality"), + ) + if err != nil { + logger.WithError(err).Error("Cannot initialize default client config because reverse proxy config is invalid") + return err + } defaultClientConfig := &pogs.ClientConfig{ Version: pogs.InitVersion(), SupervisorConfig: &pogs.SupervisorConfig{ @@ -399,13 +409,8 @@ func startDeclarativeTunnel(ctx context.Context, Timeout: c.Duration("dial-edge-timeout"), MaxFailedHeartbeats: c.Uint64("heartbeat-count"), }, - DoHProxyConfigs: []*pogs.DoHProxyConfig{}, - ReverseProxyConfigs: []*pogs.ReverseProxyConfig{ - { - TunnelHostname: h2mux.TunnelHostname(c.String("hostname")), - Origin: reverseProxyOrigin, - }, - }, + DoHProxyConfigs: []*pogs.DoHProxyConfig{}, + ReverseProxyConfigs: []*pogs.ReverseProxyConfig{reverseProxyConfig}, } autoupdater := updater.NewAutoUpdater(defaultClientConfig.SupervisorConfig.AutoUpdateFrequency, listeners) diff --git a/streamhandler/stream_handler.go b/streamhandler/stream_handler.go index 83d65d90..9f4a79f2 100644 --- a/streamhandler/stream_handler.go +++ b/streamhandler/stream_handler.go @@ -86,7 +86,7 @@ func (s *StreamHandler) UpdateConfig(newConfig []*pogs.ReverseProxyConfig) (fail s.tunnelHostnameMapper.DeleteAll() for _, tunnelConfig := range newConfig { tunnelHostname := tunnelConfig.TunnelHostname - originSerice, err := tunnelConfig.Origin.Service() + originSerice, err := tunnelConfig.OriginConfigUnmarshaler.OriginConfig.Service() if err != nil { s.logger.WithField("tunnelHostname", tunnelHostname).WithError(err).Error("Invalid origin service config") failedConfigs = append(failedConfigs, &pogs.FailedConfig{ diff --git a/streamhandler/stream_handler_test.go b/streamhandler/stream_handler_test.go index c2d95b69..acdcfda5 100644 --- a/streamhandler/stream_handler_test.go +++ b/streamhandler/stream_handler_test.go @@ -49,8 +49,10 @@ func TestServeRequest(t *testing.T) { reverseProxyConfigs := []*pogs.ReverseProxyConfig{ { TunnelHostname: testTunnelHostname, - Origin: &pogs.HTTPOriginConfig{ - URLString: httpServer.URL, + OriginConfigUnmarshaler: &pogs.OriginConfigUnmarshaler{ + OriginConfig: &pogs.HTTPOriginConfig{ + URLString: httpServer.URL, + }, }, }, } @@ -97,8 +99,10 @@ func TestServeBadRequest(t *testing.T) { reverseProxyConfigs := []*pogs.ReverseProxyConfig{ { TunnelHostname: testTunnelHostname, - Origin: &pogs.HTTPOriginConfig{ - URLString: "", + OriginConfigUnmarshaler: &pogs.OriginConfigUnmarshaler{ + OriginConfig: &pogs.HTTPOriginConfig{ + URLString: "", + }, }, }, } diff --git a/tunnelrpc/pogs/config.go b/tunnelrpc/pogs/config.go index 67926a0c..644d40f2 100644 --- a/tunnelrpc/pogs/config.go +++ b/tunnelrpc/pogs/config.go @@ -14,6 +14,7 @@ import ( "github.com/cloudflare/cloudflared/originservice" "github.com/cloudflare/cloudflared/tlsconfig" "github.com/cloudflare/cloudflared/tunnelrpc" + "github.com/pkg/errors" capnp "zombiezen.com/go/capnproto2" "zombiezen.com/go/capnproto2/pogs" @@ -27,11 +28,11 @@ import ( // ClientConfig is a collection of FallibleConfig that determines how cloudflared should function type ClientConfig struct { - Version Version - SupervisorConfig *SupervisorConfig - EdgeConnectionConfig *EdgeConnectionConfig - DoHProxyConfigs []*DoHProxyConfig - ReverseProxyConfigs []*ReverseProxyConfig + Version Version `json:"version"` + SupervisorConfig *SupervisorConfig `json:"supervisor_config"` + EdgeConnectionConfig *EdgeConnectionConfig `json:"edge_connection_config"` + DoHProxyConfigs []*DoHProxyConfig `json:"doh_proxy_configs"` + ReverseProxyConfigs []*ReverseProxyConfig `json:"reverse_proxy_configs"` } // Version type models the version of a ClientConfig @@ -56,9 +57,9 @@ type FallibleConfig interface { // SupervisorConfig specifies config of components managed by Supervisor other than ConnectionManager type SupervisorConfig struct { - AutoUpdateFrequency time.Duration - MetricsUpdateFrequency time.Duration - GracePeriod time.Duration + AutoUpdateFrequency time.Duration `json:"auto_update_frequency"` + MetricsUpdateFrequency time.Duration `json:"metrics_update_frequency"` + GracePeriod time.Duration `json:"grace_period"` } // FailReason impelents FallibleConfig interface for SupervisorConfig @@ -68,11 +69,11 @@ func (sc *SupervisorConfig) FailReason(err error) string { // EdgeConnectionConfig specifies what parameters and how may connections should ConnectionManager establish with edge type EdgeConnectionConfig struct { - NumHAConnections uint8 - HeartbeatInterval time.Duration - Timeout time.Duration - MaxFailedHeartbeats uint64 - UserCredentialPath string + NumHAConnections uint8 `json:"num_ha_connections"` + HeartbeatInterval time.Duration `json:"heartbeat_interval"` + Timeout time.Duration `json:"timeout"` + MaxFailedHeartbeats uint64 `json:"max_failed_heartbeats"` + UserCredentialPath string `json:"user_credential_path"` } // FailReason impelents FallibleConfig interface for EdgeConnectionConfig @@ -82,9 +83,9 @@ func (cmc *EdgeConnectionConfig) FailReason(err error) string { // DoHProxyConfig is configuration for DNS over HTTPS service type DoHProxyConfig struct { - ListenHost string - ListenPort uint16 - Upstreams []string + ListenHost string `json:"listen_host"` + ListenPort uint16 `json:"listen_port"` + Upstreams []string `json:"upstreams"` } // FailReason impelents FallibleConfig interface for DoHProxyConfig @@ -94,11 +95,11 @@ func (dpc *DoHProxyConfig) FailReason(err error) string { // ReverseProxyConfig how and for what hostnames can this cloudflared proxy type ReverseProxyConfig struct { - TunnelHostname h2mux.TunnelHostname - Origin OriginConfig - Retries uint64 - ConnectionTimeout time.Duration - CompressionQuality uint64 + TunnelHostname h2mux.TunnelHostname `json:"tunnel_hostname"` + OriginConfigUnmarshaler *OriginConfigUnmarshaler `json:"origin_config"` + Retries uint64 `json:"retries"` + ConnectionTimeout time.Duration `json:"connection_timeout"` + CompressionQuality uint64 `json:"compression_quality"` } func NewReverseProxyConfig( @@ -109,14 +110,14 @@ func NewReverseProxyConfig( compressionQuality uint64, ) (*ReverseProxyConfig, error) { if originConfig == nil { - return nil, fmt.Errorf("NewReverseProxyConfig: originConfig was null") + return nil, fmt.Errorf("NewReverseProxyConfig: originConfigUnmarshaler was null") } return &ReverseProxyConfig{ - TunnelHostname: h2mux.TunnelHostname(tunnelHostname), - Origin: originConfig, - Retries: retries, - ConnectionTimeout: connectionTimeout, - CompressionQuality: compressionQuality, + TunnelHostname: h2mux.TunnelHostname(tunnelHostname), + OriginConfigUnmarshaler: &OriginConfigUnmarshaler{originConfig}, + Retries: retries, + ConnectionTimeout: connectionTimeout, + CompressionQuality: compressionQuality, }, nil } @@ -133,19 +134,40 @@ type OriginConfig interface { isOriginConfig() } +type originType int + +const ( + httpType originType = iota + wsType + helloWorldType +) + +func (ot originType) String() string { + switch ot { + case httpType: + return "Http" + case wsType: + return "WebSocket" + case helloWorldType: + return "HelloWorld" + default: + return "unknown" + } +} + type HTTPOriginConfig struct { - URLString string `capnp:"urlString"` - TCPKeepAlive time.Duration `capnp:"tcpKeepAlive"` - DialDualStack bool - TLSHandshakeTimeout time.Duration `capnp:"tlsHandshakeTimeout"` - TLSVerify bool `capnp:"tlsVerify"` - OriginCAPool string - OriginServerName string - MaxIdleConnections uint64 - IdleConnectionTimeout time.Duration - ProxyConnectionTimeout time.Duration - ExpectContinueTimeout time.Duration - ChunkedEncoding bool + URLString string `capnp:"urlString" mapstructure:"url_string"` + TCPKeepAlive time.Duration `capnp:"tcpKeepAlive" mapstructure:"tcp_keep_alive"` + DialDualStack bool `mapstructure:"dial_dual_stack"` + TLSHandshakeTimeout time.Duration `capnp:"tlsHandshakeTimeout" mapstructure:"tls_handshake_timeout"` + TLSVerify bool `capnp:"tlsVerify" mapstructure:"tls_verify"` + OriginCAPool string `mapstructure:"origin_ca_pool"` + OriginServerName string `mapstructure:"origin_server_name"` + MaxIdleConnections uint64 `mapstructure:"max_idle_connections"` + IdleConnectionTimeout time.Duration `mapstructure:"idle_connection_timeout"` + ProxyConnectionTimeout time.Duration `mapstructure:"proxy_connection_timeout"` + ExpectContinueTimeout time.Duration `mapstructure:"expect_continue_timeout"` + ChunkedEncoding bool `mapstructure:"chunked_encoding"` } func (hc *HTTPOriginConfig) Service() (originservice.OriginService, error) { @@ -187,10 +209,10 @@ func (hc *HTTPOriginConfig) Service() (originservice.OriginService, error) { func (_ *HTTPOriginConfig) isOriginConfig() {} type WebSocketOriginConfig struct { - URLString string `capnp:"urlString"` - TLSVerify bool `capnp:"tlsVerify"` - OriginCAPool string - OriginServerName string + URLString string `capnp:"urlString" mapstructure:"url_string"` + TLSVerify bool `capnp:"tlsVerify" mapstructure:"tls_verify"` + OriginCAPool string `mapstructure:"origin_ca_pool"` + OriginServerName string `mapstructure:"origin_server_name"` } func (wsc *WebSocketOriginConfig) Service() (originservice.OriginService, error) { @@ -449,7 +471,7 @@ func UnmarshalDoHProxyConfig(s tunnelrpc.DoHProxyConfig) (*DoHProxyConfig, error func MarshalReverseProxyConfig(s tunnelrpc.ReverseProxyConfig, p *ReverseProxyConfig) error { s.SetTunnelHostname(p.TunnelHostname.String()) - switch config := p.Origin.(type) { + switch config := p.OriginConfigUnmarshaler.OriginConfig.(type) { case *HTTPOriginConfig: ss, err := s.Origin().NewHttp() if err != nil { @@ -500,7 +522,7 @@ func UnmarshalReverseProxyConfig(s tunnelrpc.ReverseProxyConfig) (*ReverseProxyC if err != nil { return nil, err } - p.Origin = config + p.OriginConfigUnmarshaler = &OriginConfigUnmarshaler{config} case tunnelrpc.ReverseProxyConfig_origin_Which_websocket: ss, err := s.Origin().Websocket() if err != nil { @@ -510,7 +532,7 @@ func UnmarshalReverseProxyConfig(s tunnelrpc.ReverseProxyConfig) (*ReverseProxyC if err != nil { return nil, err } - p.Origin = config + p.OriginConfigUnmarshaler = &OriginConfigUnmarshaler{config} case tunnelrpc.ReverseProxyConfig_origin_Which_helloWorld: ss, err := s.Origin().HelloWorld() if err != nil { @@ -520,7 +542,7 @@ func UnmarshalReverseProxyConfig(s tunnelrpc.ReverseProxyConfig) (*ReverseProxyC if err != nil { return nil, err } - p.Origin = config + p.OriginConfigUnmarshaler = &OriginConfigUnmarshaler{config} } p.Retries = s.Retries() p.ConnectionTimeout = time.Duration(s.ConnectionTimeout()) diff --git a/tunnelrpc/pogs/config_test.go b/tunnelrpc/pogs/config_test.go index 426d5342..8ede50f1 100644 --- a/tunnelrpc/pogs/config_test.go +++ b/tunnelrpc/pogs/config_test.go @@ -41,13 +41,13 @@ func TestClientConfig(t *testing.T) { sampleReverseProxyConfig(func(c *ReverseProxyConfig) { }), sampleReverseProxyConfig(func(c *ReverseProxyConfig) { - c.Origin = sampleHTTPOriginConfig() + c.OriginConfigUnmarshaler = &OriginConfigUnmarshaler{sampleHTTPOriginConfig()} }), sampleReverseProxyConfig(func(c *ReverseProxyConfig) { - c.Origin = sampleHTTPOriginConfig(unixPathOverride) + c.OriginConfigUnmarshaler = &OriginConfigUnmarshaler{sampleHTTPOriginConfigUnixPath()} }), sampleReverseProxyConfig(func(c *ReverseProxyConfig) { - c.Origin = sampleWebSocketOriginConfig() + c.OriginConfigUnmarshaler = &OriginConfigUnmarshaler{sampleWebSocketOriginConfig()} }), } } @@ -142,13 +142,13 @@ func TestReverseProxyConfig(t *testing.T) { testCases := []*ReverseProxyConfig{ sampleReverseProxyConfig(), sampleReverseProxyConfig(func(c *ReverseProxyConfig) { - c.Origin = sampleHTTPOriginConfig() + c.OriginConfigUnmarshaler = &OriginConfigUnmarshaler{sampleHTTPOriginConfig()} }), sampleReverseProxyConfig(func(c *ReverseProxyConfig) { - c.Origin = sampleHTTPOriginConfig(unixPathOverride) + c.OriginConfigUnmarshaler = &OriginConfigUnmarshaler{sampleHTTPOriginConfigUnixPath()} }), sampleReverseProxyConfig(func(c *ReverseProxyConfig) { - c.Origin = sampleWebSocketOriginConfig() + c.OriginConfigUnmarshaler = &OriginConfigUnmarshaler{sampleWebSocketOriginConfig()} }), } for i, testCase := range testCases { @@ -285,11 +285,11 @@ func sampleDoHProxyConfig(overrides ...func(*DoHProxyConfig)) *DoHProxyConfig { // applies any number of overrides to it, and returns it. func sampleReverseProxyConfig(overrides ...func(*ReverseProxyConfig)) *ReverseProxyConfig { sample := &ReverseProxyConfig{ - TunnelHostname: "hijk.example.com", - Origin: &HelloWorldOriginConfig{}, - Retries: 18, - ConnectionTimeout: 5 * time.Second, - CompressionQuality: 4, + TunnelHostname: "hijk.example.com", + OriginConfigUnmarshaler: &OriginConfigUnmarshaler{&HelloWorldOriginConfig{}}, + Retries: 18, + ConnectionTimeout: 5 * time.Second, + CompressionQuality: 4, } sample.ensureNoZeroFields() for _, f := range overrides { @@ -298,11 +298,9 @@ func sampleReverseProxyConfig(overrides ...func(*ReverseProxyConfig)) *ReversePr return sample } -// sampleHTTPOriginConfig initializes a new HTTPOriginConfig literal, -// applies any number of overrides to it, and returns it. func sampleHTTPOriginConfig(overrides ...func(*HTTPOriginConfig)) *HTTPOriginConfig { sample := &HTTPOriginConfig{ - URLString: "https://example.com", + URLString: "https.example.com", TCPKeepAlive: 7 * time.Second, DialDualStack: true, TLSHandshakeTimeout: 11 * time.Second, @@ -322,14 +320,28 @@ func sampleHTTPOriginConfig(overrides ...func(*HTTPOriginConfig)) *HTTPOriginCon return sample } -// unixPathOverride sets the URLString of the given HTTPOriginConfig to be a -// Unix socket (i.e. `unix:` scheme plus a file path) -func unixPathOverride(sample *HTTPOriginConfig) { - sample.URLString = "unix:/var/lib/file.sock" +func sampleHTTPOriginConfigUnixPath(overrides ...func(*HTTPOriginConfig)) *HTTPOriginConfig { + sample := &HTTPOriginConfig{ + URLString: "unix:/var/lib/file.sock", + TCPKeepAlive: 7 * time.Second, + DialDualStack: true, + TLSHandshakeTimeout: 11 * time.Second, + TLSVerify: true, + OriginCAPool: "/etc/cert.pem", + OriginServerName: "secure.example.com", + MaxIdleConnections: 19, + IdleConnectionTimeout: 17 * time.Second, + ProxyConnectionTimeout: 15 * time.Second, + ExpectContinueTimeout: 21 * time.Second, + ChunkedEncoding: true, + } + sample.ensureNoZeroFields() + for _, f := range overrides { + f(sample) + } + return sample } -// sampleWebSocketOriginConfig initializes a new WebSocketOriginConfig -// struct, applies any number of overrides to it, and returns it. func sampleWebSocketOriginConfig(overrides ...func(*WebSocketOriginConfig)) *WebSocketOriginConfig { sample := &WebSocketOriginConfig{ URLString: "ssh://example.com", diff --git a/tunnelrpc/pogs/unmarshal.go b/tunnelrpc/pogs/unmarshal.go index ef40ff0a..001c8c2a 100644 --- a/tunnelrpc/pogs/unmarshal.go +++ b/tunnelrpc/pogs/unmarshal.go @@ -4,6 +4,7 @@ import ( "encoding/json" "fmt" + "github.com/mitchellh/mapstructure" "github.com/pkg/errors" ) @@ -39,3 +40,43 @@ func (su *ScopeUnmarshaler) UnmarshalJSON(b []byte) error { return fmt.Errorf("JSON should have been an object with one root key, either 'system_name' or 'group'") } + +type OriginConfigUnmarshaler struct { + OriginConfig OriginConfig +} + +func (ocu *OriginConfigUnmarshaler) UnmarshalJSON(b []byte) error { + var originJSON map[string]interface{} + if err := json.Unmarshal(b, &originJSON); err != nil { + return errors.Wrapf(err, "cannot unmarshal %s into originJSON", string(b)) + } + + if originConfig, ok := originJSON[httpType.String()]; ok { + httpOriginConfig := &HTTPOriginConfig{} + if err := mapstructure.Decode(originConfig, httpOriginConfig); err != nil { + return errors.Wrapf(err, "cannot decode %+v into HTTPOriginConfig", originConfig) + } + ocu.OriginConfig = httpOriginConfig + return nil + } + + if originConfig, ok := originJSON[wsType.String()]; ok { + wsOriginConfig := &WebSocketOriginConfig{} + if err := mapstructure.Decode(originConfig, wsOriginConfig); err != nil { + return errors.Wrapf(err, "cannot decode %+v into WebSocketOriginConfig", originConfig) + } + ocu.OriginConfig = wsOriginConfig + return nil + } + + if originConfig, ok := originJSON[helloWorldType.String()]; ok { + helloWorldOriginConfig := &HelloWorldOriginConfig{} + if err := mapstructure.Decode(originConfig, helloWorldOriginConfig); err != nil { + return errors.Wrapf(err, "cannot decode %+v into HelloWorldOriginConfig", originConfig) + } + ocu.OriginConfig = helloWorldOriginConfig + return nil + } + + return fmt.Errorf("cannot unmarshal %s into OriginConfig", string(b)) +} \ No newline at end of file diff --git a/tunnelrpc/pogs/unmarshal_test.go b/tunnelrpc/pogs/unmarshal_test.go index 16f83eac..8e4a2981 100644 --- a/tunnelrpc/pogs/unmarshal_test.go +++ b/tunnelrpc/pogs/unmarshal_test.go @@ -1,6 +1,13 @@ package pogs -import "testing" +import ( + "encoding/json" + "strings" + "testing" + "time" + + "github.com/stretchr/testify/assert" +) func TestScopeUnmarshaler_UnmarshalJSON(t *testing.T) { type fields struct { @@ -55,6 +62,180 @@ func TestScopeUnmarshaler_UnmarshalJSON(t *testing.T) { } } +func TestUnmarshalOrigin(t *testing.T) { + tests := []struct { + jsonLiteral string + exceptedOriginConfig OriginConfig + }{ + { + jsonLiteral: `{ + "Http":{ + "url_string":"https://127.0.0.1:8080", + "tcp_keep_alive":30000000000, + "dial_dual_stack":true, + "tls_handshake_timeout":10000000000, + "tls_verify":true, + "origin_ca_pool":"", + "origin_server_name":"", + "max_idle_connections":100, + "idle_connection_timeout":90000000000, + "proxy_connection_timeout":90000000000, + "expect_continue_timeout":90000000000, + "chunked_encoding":true + } + }`, + exceptedOriginConfig: &HTTPOriginConfig{ + URLString: "https://127.0.0.1:8080", + TCPKeepAlive: time.Second * 30, + DialDualStack: true, + TLSHandshakeTimeout: time.Second * 10, + TLSVerify: true, + OriginCAPool: "", + OriginServerName: "", + MaxIdleConnections: 100, + IdleConnectionTimeout: time.Second * 90, + ProxyConnectionTimeout: time.Second * 90, + ExpectContinueTimeout: time.Second * 90, + ChunkedEncoding: true, + }, + }, + { + jsonLiteral: `{ + "WebSocket":{ + "url_string":"https://127.0.0.1:9090", + "tls_verify":true, + "origin_ca_pool":"", + "origin_server_name":"ws.example.com" + } + }`, + exceptedOriginConfig: &WebSocketOriginConfig{ + URLString: "https://127.0.0.1:9090", + TLSVerify: true, + OriginCAPool: "", + OriginServerName: "ws.example.com", + }, + }, + { + jsonLiteral: `{ + "HelloWorld": {} + }`, + exceptedOriginConfig: &HelloWorldOriginConfig{}, + }, + } + + for _, test := range tests { + originConfigJSON := strings.ReplaceAll(strings.ReplaceAll(test.jsonLiteral, "\n", ""), "\t", "") + var OriginConfigUnmarshaler OriginConfigUnmarshaler + err := json.Unmarshal([]byte(originConfigJSON), &OriginConfigUnmarshaler) + assert.NoError(t, err) + assert.Equal(t, test.exceptedOriginConfig, OriginConfigUnmarshaler.OriginConfig) + } +} + +func TestUnmarshalClientConfig(t *testing.T) { + prettyClientConfigJSON := `{ + "version":10, + "supervisor_config":{ + "auto_update_frequency":86400000000000, + "metrics_update_frequency":300000000000, + "grace_period":30000000000 + }, + "edge_connection_config":{ + "num_ha_connections":4, + "heartbeat_interval":5000000000, + "timeout":30000000000, + "max_failed_heartbeats":5, + "user_credential_path":"~/.cloudflared/cert.pem" + }, + "doh_proxy_configs":[{ + "listen_host": "localhost", + "listen_port": 53, + "upstreams": ["https://1.1.1.1/dns-query", "https://1.0.0.1/dns-query"] + }], + "reverse_proxy_configs":[{ + "tunnel_hostname":"sdfjadk33.cftunnel.com", + "origin_config":{ + "Http":{ + "url_string":"https://127.0.0.1:8080", + "tcp_keep_alive":30000000000, + "dial_dual_stack":true, + "tls_handshake_timeout":10000000000, + "tls_verify":true, + "origin_ca_pool":"", + "origin_server_name":"", + "max_idle_connections":100, + "idle_connection_timeout":90000000000, + "proxy_connection_timeout":90000000000, + "expect_continue_timeout":90000000000, + "chunked_encoding":true + } + }, + "retries":5, + "connection_timeout":30, + "compression_quality":0 + }] + }` + // replace new line and tab + clientConfigJSON := strings.ReplaceAll(strings.ReplaceAll(prettyClientConfigJSON, "\n", ""), "\t", "") + + var clientConfig ClientConfig + err := json.Unmarshal([]byte(clientConfigJSON), &clientConfig) + assert.NoError(t, err) + + assert.Equal(t, Version(10), clientConfig.Version) + + supervisorConfig := SupervisorConfig{ + AutoUpdateFrequency: time.Hour * 24, + MetricsUpdateFrequency: time.Second * 300, + GracePeriod: time.Second * 30, + } + assert.Equal(t, supervisorConfig, *clientConfig.SupervisorConfig) + + edgeConnectionConfig := EdgeConnectionConfig{ + NumHAConnections: 4, + HeartbeatInterval: time.Second * 5, + Timeout: time.Second * 30, + MaxFailedHeartbeats: 5, + UserCredentialPath: "~/.cloudflared/cert.pem", + } + assert.Equal(t, edgeConnectionConfig, *clientConfig.EdgeConnectionConfig) + + dohProxyConfig := DoHProxyConfig{ + ListenHost: "localhost", + ListenPort: 53, + Upstreams: []string{"https://1.1.1.1/dns-query", "https://1.0.0.1/dns-query"}, + } + + assert.Len(t, clientConfig.DoHProxyConfigs, 1) + assert.Equal(t, dohProxyConfig, *clientConfig.DoHProxyConfigs[0]) + + reverseProxyConfig := ReverseProxyConfig{ + TunnelHostname: "sdfjadk33.cftunnel.com", + OriginConfigUnmarshaler: &OriginConfigUnmarshaler{ + OriginConfig: &HTTPOriginConfig{ + URLString: "https://127.0.0.1:8080", + TCPKeepAlive: time.Second * 30, + DialDualStack: true, + TLSHandshakeTimeout: time.Second * 10, + TLSVerify: true, + OriginCAPool: "", + OriginServerName: "", + MaxIdleConnections: 100, + IdleConnectionTimeout: time.Second * 90, + ProxyConnectionTimeout: time.Second * 90, + ExpectContinueTimeout: time.Second * 90, + ChunkedEncoding: true, + }, + }, + Retries: 5, + ConnectionTimeout: 30, + CompressionQuality: 0, + } + + assert.Len(t, clientConfig.ReverseProxyConfigs, 1) + assert.Equal(t, reverseProxyConfig, *clientConfig.ReverseProxyConfigs[0]) +} + func eqScope(s1, s2 Scope) bool { return s1.Value() == s2.Value() && s1.PostgresType() == s2.PostgresType() } diff --git a/vendor/github.com/coreos/go-oidc/http/client.go b/vendor/github.com/coreos/go-oidc/http/client.go deleted file mode 100644 index fd079b49..00000000 --- a/vendor/github.com/coreos/go-oidc/http/client.go +++ /dev/null @@ -1,7 +0,0 @@ -package http - -import "net/http" - -type Client interface { - Do(*http.Request) (*http.Response, error) -} diff --git a/vendor/github.com/coreos/go-oidc/http/doc.go b/vendor/github.com/coreos/go-oidc/http/doc.go deleted file mode 100644 index 5687e8b8..00000000 --- a/vendor/github.com/coreos/go-oidc/http/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package http is DEPRECATED. Use net/http instead. -package http diff --git a/vendor/github.com/coreos/go-oidc/http/http.go b/vendor/github.com/coreos/go-oidc/http/http.go deleted file mode 100644 index 48717833..00000000 --- a/vendor/github.com/coreos/go-oidc/http/http.go +++ /dev/null @@ -1,161 +0,0 @@ -package http - -import ( - "encoding/base64" - "encoding/json" - "errors" - "log" - "net/http" - "net/url" - "path" - "strconv" - "strings" - "time" -) - -func WriteError(w http.ResponseWriter, code int, msg string) { - e := struct { - Error string `json:"error"` - }{ - Error: msg, - } - b, err := json.Marshal(e) - if err != nil { - log.Printf("go-oidc: failed to marshal %#v: %v", e, err) - code = http.StatusInternalServerError - b = []byte(`{"error":"server_error"}`) - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(code) - w.Write(b) -} - -// BasicAuth parses a username and password from the request's -// Authorization header. This was pulled from golang master: -// https://codereview.appspot.com/76540043 -func BasicAuth(r *http.Request) (username, password string, ok bool) { - auth := r.Header.Get("Authorization") - if auth == "" { - return - } - - if !strings.HasPrefix(auth, "Basic ") { - return - } - c, err := base64.StdEncoding.DecodeString(strings.TrimPrefix(auth, "Basic ")) - if err != nil { - return - } - cs := string(c) - s := strings.IndexByte(cs, ':') - if s < 0 { - return - } - return cs[:s], cs[s+1:], true -} - -func cacheControlMaxAge(hdr string) (time.Duration, bool, error) { - for _, field := range strings.Split(hdr, ",") { - parts := strings.SplitN(strings.TrimSpace(field), "=", 2) - k := strings.ToLower(strings.TrimSpace(parts[0])) - if k != "max-age" { - continue - } - - if len(parts) == 1 { - return 0, false, errors.New("max-age has no value") - } - - v := strings.TrimSpace(parts[1]) - if v == "" { - return 0, false, errors.New("max-age has empty value") - } - - age, err := strconv.Atoi(v) - if err != nil { - return 0, false, err - } - - if age <= 0 { - return 0, false, nil - } - - return time.Duration(age) * time.Second, true, nil - } - - return 0, false, nil -} - -func expires(date, expires string) (time.Duration, bool, error) { - if date == "" || expires == "" { - return 0, false, nil - } - - var te time.Time - var err error - if expires == "0" { - return 0, false, nil - } - te, err = time.Parse(time.RFC1123, expires) - if err != nil { - return 0, false, err - } - - td, err := time.Parse(time.RFC1123, date) - if err != nil { - return 0, false, err - } - - ttl := te.Sub(td) - - // headers indicate data already expired, caller should not - // have to care about this case - if ttl <= 0 { - return 0, false, nil - } - - return ttl, true, nil -} - -func Cacheable(hdr http.Header) (time.Duration, bool, error) { - ttl, ok, err := cacheControlMaxAge(hdr.Get("Cache-Control")) - if err != nil || ok { - return ttl, ok, err - } - - return expires(hdr.Get("Date"), hdr.Get("Expires")) -} - -// MergeQuery appends additional query values to an existing URL. -func MergeQuery(u url.URL, q url.Values) url.URL { - uv := u.Query() - for k, vs := range q { - for _, v := range vs { - uv.Add(k, v) - } - } - u.RawQuery = uv.Encode() - return u -} - -// NewResourceLocation appends a resource id to the end of the requested URL path. -func NewResourceLocation(reqURL *url.URL, id string) string { - var u url.URL - u = *reqURL - u.Path = path.Join(u.Path, id) - u.RawQuery = "" - u.Fragment = "" - return u.String() -} - -// CopyRequest returns a clone of the provided *http.Request. -// The returned object is a shallow copy of the struct and a -// deep copy of its Header field. -func CopyRequest(r *http.Request) *http.Request { - r2 := *r - r2.Header = make(http.Header) - for k, s := range r.Header { - r2.Header[k] = s - } - return &r2 -} diff --git a/vendor/github.com/coreos/go-oidc/http/url.go b/vendor/github.com/coreos/go-oidc/http/url.go deleted file mode 100644 index df60eb1a..00000000 --- a/vendor/github.com/coreos/go-oidc/http/url.go +++ /dev/null @@ -1,29 +0,0 @@ -package http - -import ( - "errors" - "net/url" -) - -// ParseNonEmptyURL checks that a string is a parsable URL which is also not empty -// since `url.Parse("")` does not return an error. Must contian a scheme and a host. -func ParseNonEmptyURL(u string) (*url.URL, error) { - if u == "" { - return nil, errors.New("url is empty") - } - - ur, err := url.Parse(u) - if err != nil { - return nil, err - } - - if ur.Scheme == "" { - return nil, errors.New("url scheme is empty") - } - - if ur.Host == "" { - return nil, errors.New("url host is empty") - } - - return ur, nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/doc.go b/vendor/github.com/coreos/go-oidc/key/doc.go deleted file mode 100644 index 936eec74..00000000 --- a/vendor/github.com/coreos/go-oidc/key/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package key is DEPRECATED. Use github.com/coreos/go-oidc instead. -package key diff --git a/vendor/github.com/coreos/go-oidc/key/key.go b/vendor/github.com/coreos/go-oidc/key/key.go deleted file mode 100644 index 208c1fc1..00000000 --- a/vendor/github.com/coreos/go-oidc/key/key.go +++ /dev/null @@ -1,153 +0,0 @@ -package key - -import ( - "crypto/rand" - "crypto/rsa" - "encoding/hex" - "encoding/json" - "io" - "time" - - "github.com/coreos/go-oidc/jose" -) - -func NewPublicKey(jwk jose.JWK) *PublicKey { - return &PublicKey{jwk: jwk} -} - -type PublicKey struct { - jwk jose.JWK -} - -func (k *PublicKey) MarshalJSON() ([]byte, error) { - return json.Marshal(&k.jwk) -} - -func (k *PublicKey) UnmarshalJSON(data []byte) error { - var jwk jose.JWK - if err := json.Unmarshal(data, &jwk); err != nil { - return err - } - k.jwk = jwk - return nil -} - -func (k *PublicKey) ID() string { - return k.jwk.ID -} - -func (k *PublicKey) Verifier() (jose.Verifier, error) { - return jose.NewVerifierRSA(k.jwk) -} - -type PrivateKey struct { - KeyID string - PrivateKey *rsa.PrivateKey -} - -func (k *PrivateKey) ID() string { - return k.KeyID -} - -func (k *PrivateKey) Signer() jose.Signer { - return jose.NewSignerRSA(k.ID(), *k.PrivateKey) -} - -func (k *PrivateKey) JWK() jose.JWK { - return jose.JWK{ - ID: k.KeyID, - Type: "RSA", - Alg: "RS256", - Use: "sig", - Exponent: k.PrivateKey.PublicKey.E, - Modulus: k.PrivateKey.PublicKey.N, - } -} - -type KeySet interface { - ExpiresAt() time.Time -} - -type PublicKeySet struct { - keys []PublicKey - index map[string]*PublicKey - expiresAt time.Time -} - -func NewPublicKeySet(jwks []jose.JWK, exp time.Time) *PublicKeySet { - keys := make([]PublicKey, len(jwks)) - index := make(map[string]*PublicKey) - for i, jwk := range jwks { - keys[i] = *NewPublicKey(jwk) - index[keys[i].ID()] = &keys[i] - } - return &PublicKeySet{ - keys: keys, - index: index, - expiresAt: exp, - } -} - -func (s *PublicKeySet) ExpiresAt() time.Time { - return s.expiresAt -} - -func (s *PublicKeySet) Keys() []PublicKey { - return s.keys -} - -func (s *PublicKeySet) Key(id string) *PublicKey { - return s.index[id] -} - -type PrivateKeySet struct { - keys []*PrivateKey - ActiveKeyID string - expiresAt time.Time -} - -func NewPrivateKeySet(keys []*PrivateKey, exp time.Time) *PrivateKeySet { - return &PrivateKeySet{ - keys: keys, - ActiveKeyID: keys[0].ID(), - expiresAt: exp.UTC(), - } -} - -func (s *PrivateKeySet) Keys() []*PrivateKey { - return s.keys -} - -func (s *PrivateKeySet) ExpiresAt() time.Time { - return s.expiresAt -} - -func (s *PrivateKeySet) Active() *PrivateKey { - for i, k := range s.keys { - if k.ID() == s.ActiveKeyID { - return s.keys[i] - } - } - - return nil -} - -type GeneratePrivateKeyFunc func() (*PrivateKey, error) - -func GeneratePrivateKey() (*PrivateKey, error) { - pk, err := rsa.GenerateKey(rand.Reader, 2048) - if err != nil { - return nil, err - } - keyID := make([]byte, 20) - if _, err := io.ReadFull(rand.Reader, keyID); err != nil { - return nil, err - } - - k := PrivateKey{ - KeyID: hex.EncodeToString(keyID), - PrivateKey: pk, - } - - return &k, nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/manager.go b/vendor/github.com/coreos/go-oidc/key/manager.go deleted file mode 100644 index 476ab6a8..00000000 --- a/vendor/github.com/coreos/go-oidc/key/manager.go +++ /dev/null @@ -1,99 +0,0 @@ -package key - -import ( - "errors" - "time" - - "github.com/jonboulle/clockwork" - - "github.com/coreos/go-oidc/jose" - "github.com/coreos/pkg/health" -) - -type PrivateKeyManager interface { - ExpiresAt() time.Time - Signer() (jose.Signer, error) - JWKs() ([]jose.JWK, error) - PublicKeys() ([]PublicKey, error) - - WritableKeySetRepo - health.Checkable -} - -func NewPrivateKeyManager() PrivateKeyManager { - return &privateKeyManager{ - clock: clockwork.NewRealClock(), - } -} - -type privateKeyManager struct { - keySet *PrivateKeySet - clock clockwork.Clock -} - -func (m *privateKeyManager) ExpiresAt() time.Time { - if m.keySet == nil { - return m.clock.Now().UTC() - } - - return m.keySet.ExpiresAt() -} - -func (m *privateKeyManager) Signer() (jose.Signer, error) { - if err := m.Healthy(); err != nil { - return nil, err - } - - return m.keySet.Active().Signer(), nil -} - -func (m *privateKeyManager) JWKs() ([]jose.JWK, error) { - if err := m.Healthy(); err != nil { - return nil, err - } - - keys := m.keySet.Keys() - jwks := make([]jose.JWK, len(keys)) - for i, k := range keys { - jwks[i] = k.JWK() - } - return jwks, nil -} - -func (m *privateKeyManager) PublicKeys() ([]PublicKey, error) { - jwks, err := m.JWKs() - if err != nil { - return nil, err - } - keys := make([]PublicKey, len(jwks)) - for i, jwk := range jwks { - keys[i] = *NewPublicKey(jwk) - } - return keys, nil -} - -func (m *privateKeyManager) Healthy() error { - if m.keySet == nil { - return errors.New("private key manager uninitialized") - } - - if len(m.keySet.Keys()) == 0 { - return errors.New("private key manager zero keys") - } - - if m.keySet.ExpiresAt().Before(m.clock.Now().UTC()) { - return errors.New("private key manager keys expired") - } - - return nil -} - -func (m *privateKeyManager) Set(keySet KeySet) error { - privKeySet, ok := keySet.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } - - m.keySet = privKeySet - return nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/repo.go b/vendor/github.com/coreos/go-oidc/key/repo.go deleted file mode 100644 index 1acdeb36..00000000 --- a/vendor/github.com/coreos/go-oidc/key/repo.go +++ /dev/null @@ -1,55 +0,0 @@ -package key - -import ( - "errors" - "sync" -) - -var ErrorNoKeys = errors.New("no keys found") - -type WritableKeySetRepo interface { - Set(KeySet) error -} - -type ReadableKeySetRepo interface { - Get() (KeySet, error) -} - -type PrivateKeySetRepo interface { - WritableKeySetRepo - ReadableKeySetRepo -} - -func NewPrivateKeySetRepo() PrivateKeySetRepo { - return &memPrivateKeySetRepo{} -} - -type memPrivateKeySetRepo struct { - mu sync.RWMutex - pks PrivateKeySet -} - -func (r *memPrivateKeySetRepo) Set(ks KeySet) error { - pks, ok := ks.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } else if pks == nil { - return errors.New("nil KeySet") - } - - r.mu.Lock() - defer r.mu.Unlock() - - r.pks = *pks - return nil -} - -func (r *memPrivateKeySetRepo) Get() (KeySet, error) { - r.mu.RLock() - defer r.mu.RUnlock() - - if r.pks.keys == nil { - return nil, ErrorNoKeys - } - return KeySet(&r.pks), nil -} diff --git a/vendor/github.com/coreos/go-oidc/key/rotate.go b/vendor/github.com/coreos/go-oidc/key/rotate.go deleted file mode 100644 index bc6cdfb1..00000000 --- a/vendor/github.com/coreos/go-oidc/key/rotate.go +++ /dev/null @@ -1,159 +0,0 @@ -package key - -import ( - "errors" - "log" - "time" - - ptime "github.com/coreos/pkg/timeutil" - "github.com/jonboulle/clockwork" -) - -var ( - ErrorPrivateKeysExpired = errors.New("private keys have expired") -) - -func NewPrivateKeyRotator(repo PrivateKeySetRepo, ttl time.Duration) *PrivateKeyRotator { - return &PrivateKeyRotator{ - repo: repo, - ttl: ttl, - - keep: 2, - generateKey: GeneratePrivateKey, - clock: clockwork.NewRealClock(), - } -} - -type PrivateKeyRotator struct { - repo PrivateKeySetRepo - generateKey GeneratePrivateKeyFunc - clock clockwork.Clock - keep int - ttl time.Duration -} - -func (r *PrivateKeyRotator) expiresAt() time.Time { - return r.clock.Now().UTC().Add(r.ttl) -} - -func (r *PrivateKeyRotator) Healthy() error { - pks, err := r.privateKeySet() - if err != nil { - return err - } - - if r.clock.Now().After(pks.ExpiresAt()) { - return ErrorPrivateKeysExpired - } - - return nil -} - -func (r *PrivateKeyRotator) privateKeySet() (*PrivateKeySet, error) { - ks, err := r.repo.Get() - if err != nil { - return nil, err - } - - pks, ok := ks.(*PrivateKeySet) - if !ok { - return nil, errors.New("unable to cast to PrivateKeySet") - } - return pks, nil -} - -func (r *PrivateKeyRotator) nextRotation() (time.Duration, error) { - pks, err := r.privateKeySet() - if err == ErrorNoKeys { - return 0, nil - } - if err != nil { - return 0, err - } - - now := r.clock.Now() - - // Ideally, we want to rotate after half the TTL has elapsed. - idealRotationTime := pks.ExpiresAt().Add(-r.ttl / 2) - - // If we are past the ideal rotation time, rotate immediatly. - return max(0, idealRotationTime.Sub(now)), nil -} - -func max(a, b time.Duration) time.Duration { - if a > b { - return a - } - return b -} - -func (r *PrivateKeyRotator) Run() chan struct{} { - attempt := func() { - k, err := r.generateKey() - if err != nil { - log.Printf("go-oidc: failed generating signing key: %v", err) - return - } - - exp := r.expiresAt() - if err := rotatePrivateKeys(r.repo, k, r.keep, exp); err != nil { - log.Printf("go-oidc: key rotation failed: %v", err) - return - } - } - - stop := make(chan struct{}) - go func() { - for { - var nextRotation time.Duration - var sleep time.Duration - var err error - for { - if nextRotation, err = r.nextRotation(); err == nil { - break - } - sleep = ptime.ExpBackoff(sleep, time.Minute) - log.Printf("go-oidc: error getting nextRotation, retrying in %v: %v", sleep, err) - time.Sleep(sleep) - } - - select { - case <-r.clock.After(nextRotation): - attempt() - case <-stop: - return - } - } - }() - - return stop -} - -func rotatePrivateKeys(repo PrivateKeySetRepo, k *PrivateKey, keep int, exp time.Time) error { - ks, err := repo.Get() - if err != nil && err != ErrorNoKeys { - return err - } - - var keys []*PrivateKey - if ks != nil { - pks, ok := ks.(*PrivateKeySet) - if !ok { - return errors.New("unable to cast to PrivateKeySet") - } - keys = pks.Keys() - } - - keys = append([]*PrivateKey{k}, keys...) - if l := len(keys); l > keep { - keys = keys[0:keep] - } - - nks := PrivateKeySet{ - keys: keys, - ActiveKeyID: k.ID(), - expiresAt: exp, - } - - return repo.Set(KeySet(&nks)) -} diff --git a/vendor/github.com/coreos/go-oidc/key/sync.go b/vendor/github.com/coreos/go-oidc/key/sync.go deleted file mode 100644 index b887f7b5..00000000 --- a/vendor/github.com/coreos/go-oidc/key/sync.go +++ /dev/null @@ -1,91 +0,0 @@ -package key - -import ( - "errors" - "log" - "time" - - "github.com/jonboulle/clockwork" - - "github.com/coreos/pkg/timeutil" -) - -func NewKeySetSyncer(r ReadableKeySetRepo, w WritableKeySetRepo) *KeySetSyncer { - return &KeySetSyncer{ - readable: r, - writable: w, - clock: clockwork.NewRealClock(), - } -} - -type KeySetSyncer struct { - readable ReadableKeySetRepo - writable WritableKeySetRepo - clock clockwork.Clock -} - -func (s *KeySetSyncer) Run() chan struct{} { - stop := make(chan struct{}) - go func() { - var failing bool - var next time.Duration - for { - exp, err := syncKeySet(s.readable, s.writable, s.clock) - if err != nil || exp == 0 { - if !failing { - failing = true - next = time.Second - } else { - next = timeutil.ExpBackoff(next, time.Minute) - } - if exp == 0 { - log.Printf("Synced to already expired key set, retrying in %v: %v", next, err) - - } else { - log.Printf("Failed syncing key set, retrying in %v: %v", next, err) - } - } else { - failing = false - next = exp / 2 - } - - select { - case <-s.clock.After(next): - continue - case <-stop: - return - } - } - }() - - return stop -} - -func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) { - return syncKeySet(r, w, clockwork.NewRealClock()) -} - -// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire. -// If keyset has already expired, returns a zero duration. -func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) { - var ks KeySet - ks, err = r.Get() - if err != nil { - return - } - - if ks == nil { - err = errors.New("no source KeySet") - return - } - - if err = w.Set(ks); err != nil { - return - } - - now := clock.Now() - if ks.ExpiresAt().After(now) { - exp = ks.ExpiresAt().Sub(now) - } - return -} diff --git a/vendor/github.com/coreos/go-oidc/oauth2/doc.go b/vendor/github.com/coreos/go-oidc/oauth2/doc.go deleted file mode 100644 index 52eb3085..00000000 --- a/vendor/github.com/coreos/go-oidc/oauth2/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package oauth2 is DEPRECATED. Use golang.org/x/oauth instead. -package oauth2 diff --git a/vendor/github.com/coreos/go-oidc/oauth2/error.go b/vendor/github.com/coreos/go-oidc/oauth2/error.go deleted file mode 100644 index 50d89094..00000000 --- a/vendor/github.com/coreos/go-oidc/oauth2/error.go +++ /dev/null @@ -1,29 +0,0 @@ -package oauth2 - -const ( - ErrorAccessDenied = "access_denied" - ErrorInvalidClient = "invalid_client" - ErrorInvalidGrant = "invalid_grant" - ErrorInvalidRequest = "invalid_request" - ErrorServerError = "server_error" - ErrorUnauthorizedClient = "unauthorized_client" - ErrorUnsupportedGrantType = "unsupported_grant_type" - ErrorUnsupportedResponseType = "unsupported_response_type" -) - -type Error struct { - Type string `json:"error"` - Description string `json:"error_description,omitempty"` - State string `json:"state,omitempty"` -} - -func (e *Error) Error() string { - if e.Description != "" { - return e.Type + ": " + e.Description - } - return e.Type -} - -func NewError(typ string) *Error { - return &Error{Type: typ} -} diff --git a/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go b/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go deleted file mode 100644 index 72d1d671..00000000 --- a/vendor/github.com/coreos/go-oidc/oauth2/oauth2.go +++ /dev/null @@ -1,416 +0,0 @@ -package oauth2 - -import ( - "encoding/json" - "errors" - "fmt" - "io/ioutil" - "mime" - "net/http" - "net/url" - "sort" - "strconv" - "strings" - - phttp "github.com/coreos/go-oidc/http" -) - -// ResponseTypesEqual compares two response_type values. If either -// contains a space, it is treated as an unordered list. For example, -// comparing "code id_token" and "id_token code" would evaluate to true. -func ResponseTypesEqual(r1, r2 string) bool { - if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") { - // fast route, no split needed - return r1 == r2 - } - - // split, sort, and compare - r1Fields := strings.Fields(r1) - r2Fields := strings.Fields(r2) - if len(r1Fields) != len(r2Fields) { - return false - } - sort.Strings(r1Fields) - sort.Strings(r2Fields) - for i, r1Field := range r1Fields { - if r1Field != r2Fields[i] { - return false - } - } - return true -} - -const ( - // OAuth2.0 response types registered by OIDC. - // - // See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents - ResponseTypeCode = "code" - ResponseTypeCodeIDToken = "code id_token" - ResponseTypeCodeIDTokenToken = "code id_token token" - ResponseTypeIDToken = "id_token" - ResponseTypeIDTokenToken = "id_token token" - ResponseTypeToken = "token" - ResponseTypeNone = "none" -) - -const ( - GrantTypeAuthCode = "authorization_code" - GrantTypeClientCreds = "client_credentials" - GrantTypeUserCreds = "password" - GrantTypeImplicit = "implicit" - GrantTypeRefreshToken = "refresh_token" - - AuthMethodClientSecretPost = "client_secret_post" - AuthMethodClientSecretBasic = "client_secret_basic" - AuthMethodClientSecretJWT = "client_secret_jwt" - AuthMethodPrivateKeyJWT = "private_key_jwt" -) - -type Config struct { - Credentials ClientCredentials - Scope []string - RedirectURL string - AuthURL string - TokenURL string - - // Must be one of the AuthMethodXXX methods above. Right now, only - // AuthMethodClientSecretPost and AuthMethodClientSecretBasic are supported. - AuthMethod string -} - -type Client struct { - hc phttp.Client - creds ClientCredentials - scope []string - authURL *url.URL - redirectURL *url.URL - tokenURL *url.URL - authMethod string -} - -type ClientCredentials struct { - ID string - Secret string -} - -func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) { - if len(cfg.Credentials.ID) == 0 { - err = errors.New("missing client id") - return - } - - if len(cfg.Credentials.Secret) == 0 { - err = errors.New("missing client secret") - return - } - - if cfg.AuthMethod == "" { - cfg.AuthMethod = AuthMethodClientSecretBasic - } else if cfg.AuthMethod != AuthMethodClientSecretPost && cfg.AuthMethod != AuthMethodClientSecretBasic { - err = fmt.Errorf("auth method %q is not supported", cfg.AuthMethod) - return - } - - au, err := phttp.ParseNonEmptyURL(cfg.AuthURL) - if err != nil { - return - } - - tu, err := phttp.ParseNonEmptyURL(cfg.TokenURL) - if err != nil { - return - } - - // Allow empty redirect URL in the case where the client - // only needs to verify a given token. - ru, err := url.Parse(cfg.RedirectURL) - if err != nil { - return - } - - c = &Client{ - creds: cfg.Credentials, - scope: cfg.Scope, - redirectURL: ru, - authURL: au, - tokenURL: tu, - hc: hc, - authMethod: cfg.AuthMethod, - } - - return -} - -// Return the embedded HTTP client -func (c *Client) HttpClient() phttp.Client { - return c.hc -} - -// Generate the url for initial redirect to oauth provider. -func (c *Client) AuthCodeURL(state, accessType, prompt string) string { - v := c.commonURLValues() - v.Set("state", state) - if strings.ToLower(accessType) == "offline" { - v.Set("access_type", "offline") - } - - if prompt != "" { - v.Set("prompt", prompt) - } - v.Set("response_type", "code") - - q := v.Encode() - u := *c.authURL - if u.RawQuery == "" { - u.RawQuery = q - } else { - u.RawQuery += "&" + q - } - return u.String() -} - -func (c *Client) commonURLValues() url.Values { - return url.Values{ - "redirect_uri": {c.redirectURL.String()}, - "scope": {strings.Join(c.scope, " ")}, - "client_id": {c.creds.ID}, - } -} - -func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) { - var req *http.Request - var err error - switch c.authMethod { - case AuthMethodClientSecretPost: - values.Set("client_secret", c.creds.Secret) - req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) - if err != nil { - return nil, err - } - case AuthMethodClientSecretBasic: - req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode())) - if err != nil { - return nil, err - } - encodedID := url.QueryEscape(c.creds.ID) - encodedSecret := url.QueryEscape(c.creds.Secret) - req.SetBasicAuth(encodedID, encodedSecret) - default: - panic("misconfigured client: auth method not supported") - } - - req.Header.Set("Content-Type", "application/x-www-form-urlencoded") - return req, nil - -} - -// ClientCredsToken posts the client id and secret to obtain a token scoped to the OAuth2 client via the "client_credentials" grant type. -// May not be supported by all OAuth2 servers. -func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err error) { - v := url.Values{ - "scope": {strings.Join(scope, " ")}, - "grant_type": {GrantTypeClientCreds}, - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type -// May not be supported by all OAuth2 servers. -func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) { - v := url.Values{ - "scope": {strings.Join(c.scope, " ")}, - "grant_type": {GrantTypeUserCreds}, - "username": {username}, - "password": {password}, - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -// RequestToken requests a token from the Token Endpoint with the specified grantType. -// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code. -// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token. -func (c *Client) RequestToken(grantType, value string) (result TokenResponse, err error) { - v := c.commonURLValues() - - v.Set("grant_type", grantType) - v.Set("client_secret", c.creds.Secret) - switch grantType { - case GrantTypeAuthCode: - v.Set("code", value) - case GrantTypeRefreshToken: - v.Set("refresh_token", value) - default: - err = fmt.Errorf("unsupported grant_type: %v", grantType) - return - } - - req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v) - if err != nil { - return - } - - resp, err := c.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - return parseTokenResponse(resp) -} - -func parseTokenResponse(resp *http.Response) (result TokenResponse, err error) { - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - return - } - badStatusCode := resp.StatusCode < 200 || resp.StatusCode > 299 - - contentType, _, err := mime.ParseMediaType(resp.Header.Get("Content-Type")) - if err != nil { - return - } - - result = TokenResponse{ - RawBody: body, - } - - newError := func(typ, desc, state string) error { - if typ == "" { - return fmt.Errorf("unrecognized error %s", body) - } - return &Error{typ, desc, state} - } - - if contentType == "application/x-www-form-urlencoded" || contentType == "text/plain" { - var vals url.Values - vals, err = url.ParseQuery(string(body)) - if err != nil { - return - } - if error := vals.Get("error"); error != "" || badStatusCode { - err = newError(error, vals.Get("error_description"), vals.Get("state")) - return - } - e := vals.Get("expires_in") - if e == "" { - e = vals.Get("expires") - } - if e != "" { - result.Expires, err = strconv.Atoi(e) - if err != nil { - return - } - } - result.AccessToken = vals.Get("access_token") - result.TokenType = vals.Get("token_type") - result.IDToken = vals.Get("id_token") - result.RefreshToken = vals.Get("refresh_token") - result.Scope = vals.Get("scope") - } else { - var r struct { - AccessToken string `json:"access_token"` - TokenType string `json:"token_type"` - IDToken string `json:"id_token"` - RefreshToken string `json:"refresh_token"` - Scope string `json:"scope"` - State string `json:"state"` - ExpiresIn json.Number `json:"expires_in"` // Azure AD returns string - Expires int `json:"expires"` - Error string `json:"error"` - Desc string `json:"error_description"` - } - if err = json.Unmarshal(body, &r); err != nil { - return - } - if r.Error != "" || badStatusCode { - err = newError(r.Error, r.Desc, r.State) - return - } - result.AccessToken = r.AccessToken - result.TokenType = r.TokenType - result.IDToken = r.IDToken - result.RefreshToken = r.RefreshToken - result.Scope = r.Scope - if expiresIn, err := r.ExpiresIn.Int64(); err != nil { - result.Expires = r.Expires - } else { - result.Expires = int(expiresIn) - } - } - return -} - -type TokenResponse struct { - AccessToken string - TokenType string - Expires int - IDToken string - RefreshToken string // OPTIONAL. - Scope string // OPTIONAL, if identical to the scope requested by the client, otherwise, REQUIRED. - RawBody []byte // In case callers need some other non-standard info from the token response -} - -type AuthCodeRequest struct { - ResponseType string - ClientID string - RedirectURL *url.URL - Scope []string - State string -} - -func ParseAuthCodeRequest(q url.Values) (AuthCodeRequest, error) { - acr := AuthCodeRequest{ - ResponseType: q.Get("response_type"), - ClientID: q.Get("client_id"), - State: q.Get("state"), - Scope: make([]string, 0), - } - - qs := strings.TrimSpace(q.Get("scope")) - if qs != "" { - acr.Scope = strings.Split(qs, " ") - } - - err := func() error { - if acr.ClientID == "" { - return NewError(ErrorInvalidRequest) - } - - redirectURL := q.Get("redirect_uri") - if redirectURL != "" { - ru, err := url.Parse(redirectURL) - if err != nil { - return NewError(ErrorInvalidRequest) - } - acr.RedirectURL = ru - } - - return nil - }() - - return acr, err -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/client.go b/vendor/github.com/coreos/go-oidc/oidc/client.go deleted file mode 100644 index 7a3cb40f..00000000 --- a/vendor/github.com/coreos/go-oidc/oidc/client.go +++ /dev/null @@ -1,846 +0,0 @@ -package oidc - -import ( - "encoding/json" - "errors" - "fmt" - "net/http" - "net/mail" - "net/url" - "sync" - "time" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/key" - "github.com/coreos/go-oidc/oauth2" -) - -const ( - // amount of time that must pass after the last key sync - // completes before another attempt may begin - keySyncWindow = 5 * time.Second -) - -var ( - DefaultScope = []string{"openid", "email", "profile"} - - supportedAuthMethods = map[string]struct{}{ - oauth2.AuthMethodClientSecretBasic: struct{}{}, - oauth2.AuthMethodClientSecretPost: struct{}{}, - } -) - -type ClientCredentials oauth2.ClientCredentials - -type ClientIdentity struct { - Credentials ClientCredentials - Metadata ClientMetadata -} - -type JWAOptions struct { - // SigningAlg specifies an JWA alg for signing JWTs. - // - // Specifying this field implies different actions depending on the context. It may - // require objects be serialized and signed as a JWT instead of plain JSON, or - // require an existing JWT object use the specified alg. - // - // See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata - SigningAlg string - // EncryptionAlg, if provided, specifies that the returned or sent object be stored - // (or nested) within a JWT object and encrypted with the provided JWA alg. - EncryptionAlg string - // EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If - // EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults - // to A128CBC-HS256. - // - // If EncryptionEnc is provided EncryptionAlg must also be specified. - EncryptionEnc string -} - -func (opt JWAOptions) valid() error { - if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" { - return errors.New("encryption encoding provided with no encryption algorithm") - } - return nil -} - -func (opt JWAOptions) defaults() JWAOptions { - if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" { - opt.EncryptionEnc = jose.EncA128CBCHS256 - } - return opt -} - -var ( - // Ensure ClientMetadata satisfies these interfaces. - _ json.Marshaler = &ClientMetadata{} - _ json.Unmarshaler = &ClientMetadata{} -) - -// ClientMetadata holds metadata that the authorization server associates -// with a client identifier. The fields range from human-facing display -// strings such as client name, to items that impact the security of the -// protocol, such as the list of valid redirect URIs. -// -// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata -// -// TODO: support language specific claim representations -// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts -type ClientMetadata struct { - RedirectURIs []url.URL // Required - - // A list of OAuth 2.0 "response_type" values that the client wishes to restrict - // itself to. Either "code", "token", or another registered extension. - // - // If omitted, only "code" will be used. - ResponseTypes []string - // A list of OAuth 2.0 grant types the client wishes to restrict itself to. - // The grant type values used by OIDC are "authorization_code", "implicit", - // and "refresh_token". - // - // If ommitted, only "authorization_code" will be used. - GrantTypes []string - // "native" or "web". If omitted, "web". - ApplicationType string - - // List of email addresses. - Contacts []mail.Address - // Name of client to be presented to the end-user. - ClientName string - // URL that references a logo for the Client application. - LogoURI *url.URL - // URL of the home page of the Client. - ClientURI *url.URL - // Profile data policies and terms of use to be provided to the end user. - PolicyURI *url.URL - TermsOfServiceURI *url.URL - - // URL to or the value of the client's JSON Web Key Set document. - JWKSURI *url.URL - JWKS *jose.JWKSet - - // URL referencing a flie with a single JSON array of redirect URIs. - SectorIdentifierURI *url.URL - - SubjectType string - - // Options to restrict the JWS alg and enc values used for server responses and requests. - IDTokenResponseOptions JWAOptions - UserInfoResponseOptions JWAOptions - RequestObjectOptions JWAOptions - - // Client requested authorization method and signing options for the token endpoint. - // - // Defaults to "client_secret_basic" - TokenEndpointAuthMethod string - TokenEndpointAuthSigningAlg string - - // DefaultMaxAge specifies the maximum amount of time in seconds before an authorized - // user must reauthroize. - // - // If 0, no limitation is placed on the maximum. - DefaultMaxAge int64 - // RequireAuthTime specifies if the auth_time claim in the ID token is required. - RequireAuthTime bool - - // Default Authentication Context Class Reference values for authentication requests. - DefaultACRValues []string - - // URI that a third party can use to initiate a login by the relaying party. - // - // See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin - InitiateLoginURI *url.URL - // Pre-registered request_uri values that may be cached by the server. - RequestURIs []url.URL -} - -// Defaults returns a shallow copy of ClientMetadata with default -// values replacing omitted fields. -func (m ClientMetadata) Defaults() ClientMetadata { - if len(m.ResponseTypes) == 0 { - m.ResponseTypes = []string{oauth2.ResponseTypeCode} - } - if len(m.GrantTypes) == 0 { - m.GrantTypes = []string{oauth2.GrantTypeAuthCode} - } - if m.ApplicationType == "" { - m.ApplicationType = "web" - } - if m.TokenEndpointAuthMethod == "" { - m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic - } - m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults() - m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults() - m.RequestObjectOptions = m.RequestObjectOptions.defaults() - return m -} - -func (m *ClientMetadata) MarshalJSON() ([]byte, error) { - e := m.toEncodableStruct() - return json.Marshal(&e) -} - -func (m *ClientMetadata) UnmarshalJSON(data []byte) error { - var e encodableClientMetadata - if err := json.Unmarshal(data, &e); err != nil { - return err - } - meta, err := e.toStruct() - if err != nil { - return err - } - if err := meta.Valid(); err != nil { - return err - } - *m = meta - return nil -} - -type encodableClientMetadata struct { - RedirectURIs []string `json:"redirect_uris"` // Required - ResponseTypes []string `json:"response_types,omitempty"` - GrantTypes []string `json:"grant_types,omitempty"` - ApplicationType string `json:"application_type,omitempty"` - Contacts []string `json:"contacts,omitempty"` - ClientName string `json:"client_name,omitempty"` - LogoURI string `json:"logo_uri,omitempty"` - ClientURI string `json:"client_uri,omitempty"` - PolicyURI string `json:"policy_uri,omitempty"` - TermsOfServiceURI string `json:"tos_uri,omitempty"` - JWKSURI string `json:"jwks_uri,omitempty"` - JWKS *jose.JWKSet `json:"jwks,omitempty"` - SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"` - SubjectType string `json:"subject_type,omitempty"` - IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"` - IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"` - IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"` - UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"` - UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"` - UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"` - RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"` - RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"` - RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"` - TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"` - TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"` - DefaultMaxAge int64 `json:"default_max_age,omitempty"` - RequireAuthTime bool `json:"require_auth_time,omitempty"` - DefaultACRValues []string `json:"default_acr_values,omitempty"` - InitiateLoginURI string `json:"initiate_login_uri,omitempty"` - RequestURIs []string `json:"request_uris,omitempty"` -} - -func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) { - p := stickyErrParser{} - m := ClientMetadata{ - RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"), - ResponseTypes: c.ResponseTypes, - GrantTypes: c.GrantTypes, - ApplicationType: c.ApplicationType, - Contacts: p.parseEmails(c.Contacts, "contacts"), - ClientName: c.ClientName, - LogoURI: p.parseURI(c.LogoURI, "logo_uri"), - ClientURI: p.parseURI(c.ClientURI, "client_uri"), - PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"), - TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"), - JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"), - JWKS: c.JWKS, - SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"), - SubjectType: c.SubjectType, - TokenEndpointAuthMethod: c.TokenEndpointAuthMethod, - TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg, - DefaultMaxAge: c.DefaultMaxAge, - RequireAuthTime: c.RequireAuthTime, - DefaultACRValues: c.DefaultACRValues, - InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"), - RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"), - IDTokenResponseOptions: JWAOptions{ - c.IDTokenSignedResponseAlg, - c.IDTokenEncryptedResponseAlg, - c.IDTokenEncryptedResponseEnc, - }, - UserInfoResponseOptions: JWAOptions{ - c.UserInfoSignedResponseAlg, - c.UserInfoEncryptedResponseAlg, - c.UserInfoEncryptedResponseEnc, - }, - RequestObjectOptions: JWAOptions{ - c.RequestObjectSigningAlg, - c.RequestObjectEncryptionAlg, - c.RequestObjectEncryptionEnc, - }, - } - if p.firstErr != nil { - return ClientMetadata{}, p.firstErr - } - return m, nil -} - -// stickyErrParser parses URIs and email addresses. Once it encounters -// a parse error, subsequent calls become no-op. -type stickyErrParser struct { - firstErr error -} - -func (p *stickyErrParser) parseURI(s, field string) *url.URL { - if p.firstErr != nil || s == "" { - return nil - } - u, err := url.Parse(s) - if err == nil { - if u.Host == "" { - err = errors.New("no host in URI") - } else if u.Scheme != "http" && u.Scheme != "https" { - err = errors.New("invalid URI scheme") - } - } - if err != nil { - p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err) - return nil - } - return u -} - -func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL { - if p.firstErr != nil || len(s) == 0 { - return nil - } - uris := make([]url.URL, len(s)) - for i, val := range s { - if val == "" { - p.firstErr = fmt.Errorf("invalid URI in field %s", field) - return nil - } - if u := p.parseURI(val, field); u != nil { - uris[i] = *u - } - } - return uris -} - -func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address { - if p.firstErr != nil || len(s) == 0 { - return nil - } - addrs := make([]mail.Address, len(s)) - for i, addr := range s { - if addr == "" { - p.firstErr = fmt.Errorf("invalid email in field %s", field) - return nil - } - a, err := mail.ParseAddress(addr) - if err != nil { - p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err) - return nil - } - addrs[i] = *a - } - return addrs -} - -func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata { - return encodableClientMetadata{ - RedirectURIs: urisToStrings(m.RedirectURIs), - ResponseTypes: m.ResponseTypes, - GrantTypes: m.GrantTypes, - ApplicationType: m.ApplicationType, - Contacts: emailsToStrings(m.Contacts), - ClientName: m.ClientName, - LogoURI: uriToString(m.LogoURI), - ClientURI: uriToString(m.ClientURI), - PolicyURI: uriToString(m.PolicyURI), - TermsOfServiceURI: uriToString(m.TermsOfServiceURI), - JWKSURI: uriToString(m.JWKSURI), - JWKS: m.JWKS, - SectorIdentifierURI: uriToString(m.SectorIdentifierURI), - SubjectType: m.SubjectType, - IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg, - IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg, - IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc, - UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg, - UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg, - UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc, - RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg, - RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg, - RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc, - TokenEndpointAuthMethod: m.TokenEndpointAuthMethod, - TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg, - DefaultMaxAge: m.DefaultMaxAge, - RequireAuthTime: m.RequireAuthTime, - DefaultACRValues: m.DefaultACRValues, - InitiateLoginURI: uriToString(m.InitiateLoginURI), - RequestURIs: urisToStrings(m.RequestURIs), - } -} - -func uriToString(u *url.URL) string { - if u == nil { - return "" - } - return u.String() -} - -func urisToStrings(urls []url.URL) []string { - if len(urls) == 0 { - return nil - } - sli := make([]string, len(urls)) - for i, u := range urls { - sli[i] = u.String() - } - return sli -} - -func emailsToStrings(addrs []mail.Address) []string { - if len(addrs) == 0 { - return nil - } - sli := make([]string, len(addrs)) - for i, addr := range addrs { - sli[i] = addr.String() - } - return sli -} - -// Valid determines if a ClientMetadata conforms with the OIDC specification. -// -// Valid is called by UnmarshalJSON. -// -// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for -// URLs fields where the OIDC spec requires it. This may change in future releases -// of this package. See: https://github.com/coreos/go-oidc/issues/34 -func (m *ClientMetadata) Valid() error { - if len(m.RedirectURIs) == 0 { - return errors.New("zero redirect URLs") - } - - validURI := func(u *url.URL, fieldName string) error { - if u.Host == "" { - return fmt.Errorf("no host for uri field %s", fieldName) - } - if u.Scheme != "http" && u.Scheme != "https" { - return fmt.Errorf("uri field %s scheme is not http or https", fieldName) - } - return nil - } - - uris := []struct { - val *url.URL - name string - }{ - {m.LogoURI, "logo_uri"}, - {m.ClientURI, "client_uri"}, - {m.PolicyURI, "policy_uri"}, - {m.TermsOfServiceURI, "tos_uri"}, - {m.JWKSURI, "jwks_uri"}, - {m.SectorIdentifierURI, "sector_identifier_uri"}, - {m.InitiateLoginURI, "initiate_login_uri"}, - } - - for _, uri := range uris { - if uri.val == nil { - continue - } - if err := validURI(uri.val, uri.name); err != nil { - return err - } - } - - uriLists := []struct { - vals []url.URL - name string - }{ - {m.RedirectURIs, "redirect_uris"}, - {m.RequestURIs, "request_uris"}, - } - for _, list := range uriLists { - for _, uri := range list.vals { - if err := validURI(&uri, list.name); err != nil { - return err - } - } - } - - options := []struct { - option JWAOptions - name string - }{ - {m.IDTokenResponseOptions, "id_token response"}, - {m.UserInfoResponseOptions, "userinfo response"}, - {m.RequestObjectOptions, "request_object"}, - } - for _, option := range options { - if err := option.option.valid(); err != nil { - return fmt.Errorf("invalid JWA values for %s: %v", option.name, err) - } - } - return nil -} - -type ClientRegistrationResponse struct { - ClientID string // Required - ClientSecret string - RegistrationAccessToken string - RegistrationClientURI string - // If IsZero is true, unspecified. - ClientIDIssuedAt time.Time - // Time at which the client_secret will expire. - // If IsZero is true, it will not expire. - ClientSecretExpiresAt time.Time - - ClientMetadata -} - -type encodableClientRegistrationResponse struct { - ClientID string `json:"client_id"` // Required - ClientSecret string `json:"client_secret,omitempty"` - RegistrationAccessToken string `json:"registration_access_token,omitempty"` - RegistrationClientURI string `json:"registration_client_uri,omitempty"` - ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"` - // Time at which the client_secret will expire, in seconds since the epoch. - // If 0 it will not expire. - ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required - - encodableClientMetadata -} - -func unixToSec(t time.Time) int64 { - if t.IsZero() { - return 0 - } - return t.Unix() -} - -func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) { - e := encodableClientRegistrationResponse{ - ClientID: c.ClientID, - ClientSecret: c.ClientSecret, - RegistrationAccessToken: c.RegistrationAccessToken, - RegistrationClientURI: c.RegistrationClientURI, - ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt), - ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt), - encodableClientMetadata: c.ClientMetadata.toEncodableStruct(), - } - return json.Marshal(&e) -} - -func secToUnix(sec int64) time.Time { - if sec == 0 { - return time.Time{} - } - return time.Unix(sec, 0) -} - -func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error { - var e encodableClientRegistrationResponse - if err := json.Unmarshal(data, &e); err != nil { - return err - } - if e.ClientID == "" { - return errors.New("no client_id in client registration response") - } - metadata, err := e.encodableClientMetadata.toStruct() - if err != nil { - return err - } - *c = ClientRegistrationResponse{ - ClientID: e.ClientID, - ClientSecret: e.ClientSecret, - RegistrationAccessToken: e.RegistrationAccessToken, - RegistrationClientURI: e.RegistrationClientURI, - ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt), - ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt), - ClientMetadata: metadata, - } - return nil -} - -type ClientConfig struct { - HTTPClient phttp.Client - Credentials ClientCredentials - Scope []string - RedirectURL string - ProviderConfig ProviderConfig - KeySet key.PublicKeySet -} - -func NewClient(cfg ClientConfig) (*Client, error) { - // Allow empty redirect URL in the case where the client - // only needs to verify a given token. - ru, err := url.Parse(cfg.RedirectURL) - if err != nil { - return nil, fmt.Errorf("invalid redirect URL: %v", err) - } - - c := Client{ - credentials: cfg.Credentials, - httpClient: cfg.HTTPClient, - scope: cfg.Scope, - redirectURL: ru.String(), - providerConfig: newProviderConfigRepo(cfg.ProviderConfig), - keySet: cfg.KeySet, - } - - if c.httpClient == nil { - c.httpClient = http.DefaultClient - } - - if c.scope == nil { - c.scope = make([]string, len(DefaultScope)) - copy(c.scope, DefaultScope) - } - - return &c, nil -} - -type Client struct { - httpClient phttp.Client - providerConfig *providerConfigRepo - credentials ClientCredentials - redirectURL string - scope []string - keySet key.PublicKeySet - providerSyncer *ProviderConfigSyncer - - keySetSyncMutex sync.RWMutex - lastKeySetSync time.Time -} - -func (c *Client) Healthy() error { - now := time.Now().UTC() - - cfg := c.providerConfig.Get() - - if cfg.Empty() { - return errors.New("oidc client provider config empty") - } - - if !cfg.ExpiresAt.IsZero() && cfg.ExpiresAt.Before(now) { - return errors.New("oidc client provider config expired") - } - - return nil -} - -func (c *Client) OAuthClient() (*oauth2.Client, error) { - cfg := c.providerConfig.Get() - authMethod, err := chooseAuthMethod(cfg) - if err != nil { - return nil, err - } - - ocfg := oauth2.Config{ - Credentials: oauth2.ClientCredentials(c.credentials), - RedirectURL: c.redirectURL, - AuthURL: cfg.AuthEndpoint.String(), - TokenURL: cfg.TokenEndpoint.String(), - Scope: c.scope, - AuthMethod: authMethod, - } - - return oauth2.NewClient(c.httpClient, ocfg) -} - -func chooseAuthMethod(cfg ProviderConfig) (string, error) { - if len(cfg.TokenEndpointAuthMethodsSupported) == 0 { - return oauth2.AuthMethodClientSecretBasic, nil - } - - for _, authMethod := range cfg.TokenEndpointAuthMethodsSupported { - if _, ok := supportedAuthMethods[authMethod]; ok { - return authMethod, nil - } - } - - return "", errors.New("no supported auth methods") -} - -// SyncProviderConfig starts the provider config syncer -func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} { - r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL) - s := NewProviderConfigSyncer(r, c.providerConfig) - stop := s.Run() - s.WaitUntilInitialSync() - return stop -} - -func (c *Client) maybeSyncKeys() error { - tooSoon := func() bool { - return time.Now().UTC().Before(c.lastKeySetSync.Add(keySyncWindow)) - } - - // ignore request to sync keys if a sync operation has been - // attempted too recently - if tooSoon() { - return nil - } - - c.keySetSyncMutex.Lock() - defer c.keySetSyncMutex.Unlock() - - // check again, as another goroutine may have been holding - // the lock while updating the keys - if tooSoon() { - return nil - } - - cfg := c.providerConfig.Get() - r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String()) - w := &clientKeyRepo{client: c} - _, err := key.Sync(r, w) - c.lastKeySetSync = time.Now().UTC() - - return err -} - -type clientKeyRepo struct { - client *Client -} - -func (r *clientKeyRepo) Set(ks key.KeySet) error { - pks, ok := ks.(*key.PublicKeySet) - if !ok { - return errors.New("unable to cast to PublicKey") - } - r.client.keySet = *pks - return nil -} - -func (c *Client) ClientCredsToken(scope []string) (jose.JWT, error) { - cfg := c.providerConfig.Get() - - if !cfg.SupportsGrantType(oauth2.GrantTypeClientCreds) { - return jose.JWT{}, fmt.Errorf("%v grant type is not supported", oauth2.GrantTypeClientCreds) - } - - oac, err := c.OAuthClient() - if err != nil { - return jose.JWT{}, err - } - - t, err := oac.ClientCredsToken(scope) - if err != nil { - return jose.JWT{}, err - } - - jwt, err := jose.ParseJWT(t.IDToken) - if err != nil { - return jose.JWT{}, err - } - - return jwt, c.VerifyJWT(jwt) -} - -// ExchangeAuthCode exchanges an OAuth2 auth code for an OIDC JWT ID token. -func (c *Client) ExchangeAuthCode(code string) (jose.JWT, error) { - oac, err := c.OAuthClient() - if err != nil { - return jose.JWT{}, err - } - - t, err := oac.RequestToken(oauth2.GrantTypeAuthCode, code) - if err != nil { - return jose.JWT{}, err - } - - jwt, err := jose.ParseJWT(t.IDToken) - if err != nil { - return jose.JWT{}, err - } - - return jwt, c.VerifyJWT(jwt) -} - -// RefreshToken uses a refresh token to exchange for a new OIDC JWT ID Token. -func (c *Client) RefreshToken(refreshToken string) (jose.JWT, error) { - oac, err := c.OAuthClient() - if err != nil { - return jose.JWT{}, err - } - - t, err := oac.RequestToken(oauth2.GrantTypeRefreshToken, refreshToken) - if err != nil { - return jose.JWT{}, err - } - - jwt, err := jose.ParseJWT(t.IDToken) - if err != nil { - return jose.JWT{}, err - } - - return jwt, c.VerifyJWT(jwt) -} - -func (c *Client) VerifyJWT(jwt jose.JWT) error { - var keysFunc func() []key.PublicKey - if kID, ok := jwt.KeyID(); ok { - keysFunc = c.keysFuncWithID(kID) - } else { - keysFunc = c.keysFuncAll() - } - - v := NewJWTVerifier( - c.providerConfig.Get().Issuer.String(), - c.credentials.ID, - c.maybeSyncKeys, keysFunc) - - return v.Verify(jwt) -} - -// keysFuncWithID returns a function that retrieves at most unexpired -// public key from the Client that matches the provided ID -func (c *Client) keysFuncWithID(kID string) func() []key.PublicKey { - return func() []key.PublicKey { - c.keySetSyncMutex.RLock() - defer c.keySetSyncMutex.RUnlock() - - if c.keySet.ExpiresAt().Before(time.Now()) { - return []key.PublicKey{} - } - - k := c.keySet.Key(kID) - if k == nil { - return []key.PublicKey{} - } - - return []key.PublicKey{*k} - } -} - -// keysFuncAll returns a function that retrieves all unexpired public -// keys from the Client -func (c *Client) keysFuncAll() func() []key.PublicKey { - return func() []key.PublicKey { - c.keySetSyncMutex.RLock() - defer c.keySetSyncMutex.RUnlock() - - if c.keySet.ExpiresAt().Before(time.Now()) { - return []key.PublicKey{} - } - - return c.keySet.Keys() - } -} - -type providerConfigRepo struct { - mu sync.RWMutex - config ProviderConfig // do not access directly, use Get() -} - -func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo { - return &providerConfigRepo{sync.RWMutex{}, pc} -} - -// returns an error to implement ProviderConfigSetter -func (r *providerConfigRepo) Set(cfg ProviderConfig) error { - r.mu.Lock() - defer r.mu.Unlock() - r.config = cfg - return nil -} - -func (r *providerConfigRepo) Get() ProviderConfig { - r.mu.RLock() - defer r.mu.RUnlock() - return r.config -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/doc.go b/vendor/github.com/coreos/go-oidc/oidc/doc.go deleted file mode 100644 index 196611ec..00000000 --- a/vendor/github.com/coreos/go-oidc/oidc/doc.go +++ /dev/null @@ -1,2 +0,0 @@ -// Package oidc is DEPRECATED. Use github.com/coreos/go-oidc instead. -package oidc diff --git a/vendor/github.com/coreos/go-oidc/oidc/identity.go b/vendor/github.com/coreos/go-oidc/oidc/identity.go deleted file mode 100644 index 9bfa8e34..00000000 --- a/vendor/github.com/coreos/go-oidc/oidc/identity.go +++ /dev/null @@ -1,44 +0,0 @@ -package oidc - -import ( - "errors" - "time" - - "github.com/coreos/go-oidc/jose" -) - -type Identity struct { - ID string - Name string - Email string - ExpiresAt time.Time -} - -func IdentityFromClaims(claims jose.Claims) (*Identity, error) { - if claims == nil { - return nil, errors.New("nil claim set") - } - - var ident Identity - var err error - var ok bool - - if ident.ID, ok, err = claims.StringClaim("sub"); err != nil { - return nil, err - } else if !ok { - return nil, errors.New("missing required claim: sub") - } - - if ident.Email, _, err = claims.StringClaim("email"); err != nil { - return nil, err - } - - exp, ok, err := claims.TimeClaim("exp") - if err != nil { - return nil, err - } else if ok { - ident.ExpiresAt = exp - } - - return &ident, nil -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/interface.go b/vendor/github.com/coreos/go-oidc/oidc/interface.go deleted file mode 100644 index 248cac0b..00000000 --- a/vendor/github.com/coreos/go-oidc/oidc/interface.go +++ /dev/null @@ -1,3 +0,0 @@ -package oidc - -type LoginFunc func(ident Identity, sessionKey string) (redirectURL string, err error) diff --git a/vendor/github.com/coreos/go-oidc/oidc/key.go b/vendor/github.com/coreos/go-oidc/oidc/key.go deleted file mode 100755 index 82a0f567..00000000 --- a/vendor/github.com/coreos/go-oidc/oidc/key.go +++ /dev/null @@ -1,67 +0,0 @@ -package oidc - -import ( - "encoding/json" - "errors" - "net/http" - "time" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/key" -) - -// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no -// Cache-Control header is provided by the JWK Set document endpoint. -const DefaultPublicKeySetTTL = 24 * time.Hour - -// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document. -func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo { - return &remotePublicKeyRepo{hc: hc, ep: ep} -} - -type remotePublicKeyRepo struct { - hc phttp.Client - ep string -} - -// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL -// is set on the Key Set to avoid it having to be re-retrieved for every -// encryption event. This TTL is typically controlled by the endpoint returning -// a Cache-Control header, but defaults to 24 hours if no Cache-Control header -// is found. -func (r *remotePublicKeyRepo) Get() (key.KeySet, error) { - req, err := http.NewRequest("GET", r.ep, nil) - if err != nil { - return nil, err - } - - resp, err := r.hc.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - var d struct { - Keys []jose.JWK `json:"keys"` - } - if err := json.NewDecoder(resp.Body).Decode(&d); err != nil { - return nil, err - } - - if len(d.Keys) == 0 { - return nil, errors.New("zero keys in response") - } - - ttl, ok, err := phttp.Cacheable(resp.Header) - if err != nil { - return nil, err - } - if !ok { - ttl = DefaultPublicKeySetTTL - } - - exp := time.Now().UTC().Add(ttl) - ks := key.NewPublicKeySet(d.Keys, exp) - return ks, nil -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/provider.go b/vendor/github.com/coreos/go-oidc/oidc/provider.go deleted file mode 100644 index 2afc0da3..00000000 --- a/vendor/github.com/coreos/go-oidc/oidc/provider.go +++ /dev/null @@ -1,687 +0,0 @@ -package oidc - -import ( - "encoding/json" - "errors" - "fmt" - "log" - "net/http" - "net/url" - "strings" - "sync" - "time" - - "github.com/coreos/pkg/timeutil" - "github.com/jonboulle/clockwork" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/oauth2" -) - -const ( - // Subject Identifier types defined by the OIDC spec. Specifies if the provider - // should provide the same sub claim value to all clients (public) or a unique - // value for each client (pairwise). - // - // See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes - SubjectTypePublic = "public" - SubjectTypePairwise = "pairwise" -) - -var ( - // Default values for omitted provider config fields. - // - // Use ProviderConfig's Defaults method to fill a provider config with these values. - DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit} - DefaultResponseModesSupported = []string{"query", "fragment"} - DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic} - DefaultClaimTypesSupported = []string{"normal"} -) - -const ( - MaximumProviderConfigSyncInterval = 24 * time.Hour - MinimumProviderConfigSyncInterval = time.Minute - - discoveryConfigPath = "/.well-known/openid-configuration" -) - -// internally configurable for tests -var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval - -var ( - // Ensure ProviderConfig satisfies these interfaces. - _ json.Marshaler = &ProviderConfig{} - _ json.Unmarshaler = &ProviderConfig{} -) - -// ProviderConfig represents the OpenID Provider Metadata specifying what -// configurations a provider supports. -// -// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata -type ProviderConfig struct { - Issuer *url.URL // Required - AuthEndpoint *url.URL // Required - TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported - UserInfoEndpoint *url.URL - KeysEndpoint *url.URL // Required - RegistrationEndpoint *url.URL - EndSessionEndpoint *url.URL - CheckSessionIFrame *url.URL - - // Servers MAY choose not to advertise some supported scope values even when this - // parameter is used, although those defined in OpenID Core SHOULD be listed, if supported. - ScopesSupported []string - // OAuth2.0 response types supported. - ResponseTypesSupported []string // Required - // OAuth2.0 response modes supported. - // - // If omitted, defaults to DefaultResponseModesSupported. - ResponseModesSupported []string - // OAuth2.0 grant types supported. - // - // If omitted, defaults to DefaultGrantTypesSupported. - GrantTypesSupported []string - ACRValuesSupported []string - // SubjectTypesSupported specifies strategies for providing values for the sub claim. - SubjectTypesSupported []string // Required - - // JWA signing and encryption algorith values supported for ID tokens. - IDTokenSigningAlgValues []string // Required - IDTokenEncryptionAlgValues []string - IDTokenEncryptionEncValues []string - - // JWA signing and encryption algorith values supported for user info responses. - UserInfoSigningAlgValues []string - UserInfoEncryptionAlgValues []string - UserInfoEncryptionEncValues []string - - // JWA signing and encryption algorith values supported for request objects. - ReqObjSigningAlgValues []string - ReqObjEncryptionAlgValues []string - ReqObjEncryptionEncValues []string - - TokenEndpointAuthMethodsSupported []string - TokenEndpointAuthSigningAlgValuesSupported []string - DisplayValuesSupported []string - ClaimTypesSupported []string - ClaimsSupported []string - ServiceDocs *url.URL - ClaimsLocalsSupported []string - UILocalsSupported []string - ClaimsParameterSupported bool - RequestParameterSupported bool - RequestURIParamaterSupported bool - RequireRequestURIRegistration bool - - Policy *url.URL - TermsOfService *url.URL - - // Not part of the OpenID Provider Metadata - ExpiresAt time.Time -} - -// Defaults returns a shallow copy of ProviderConfig with default -// values replacing omitted fields. -// -// var cfg oidc.ProviderConfig -// // Fill provider config with default values for omitted fields. -// cfg = cfg.Defaults() -// -func (p ProviderConfig) Defaults() ProviderConfig { - setDefault := func(val *[]string, defaultVal []string) { - if len(*val) == 0 { - *val = defaultVal - } - } - setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported) - setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported) - setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported) - setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported) - return p -} - -func (p *ProviderConfig) MarshalJSON() ([]byte, error) { - e := p.toEncodableStruct() - return json.Marshal(&e) -} - -func (p *ProviderConfig) UnmarshalJSON(data []byte) error { - var e encodableProviderConfig - if err := json.Unmarshal(data, &e); err != nil { - return err - } - conf, err := e.toStruct() - if err != nil { - return err - } - if err := conf.Valid(); err != nil { - return err - } - *p = conf - return nil -} - -type encodableProviderConfig struct { - Issuer string `json:"issuer"` - AuthEndpoint string `json:"authorization_endpoint"` - TokenEndpoint string `json:"token_endpoint"` - UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"` - KeysEndpoint string `json:"jwks_uri"` - RegistrationEndpoint string `json:"registration_endpoint,omitempty"` - EndSessionEndpoint string `json:"end_session_endpoint,omitempty"` - CheckSessionIFrame string `json:"check_session_iframe,omitempty"` - - // Use 'omitempty' for all slices as per OIDC spec: - // "Claims that return multiple values are represented as JSON arrays. - // Claims with zero elements MUST be omitted from the response." - // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse - - ScopesSupported []string `json:"scopes_supported,omitempty"` - ResponseTypesSupported []string `json:"response_types_supported,omitempty"` - ResponseModesSupported []string `json:"response_modes_supported,omitempty"` - GrantTypesSupported []string `json:"grant_types_supported,omitempty"` - ACRValuesSupported []string `json:"acr_values_supported,omitempty"` - SubjectTypesSupported []string `json:"subject_types_supported,omitempty"` - - IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"` - IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"` - IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"` - UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"` - UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"` - UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"` - ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"` - ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"` - ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"` - - TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"` - TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"` - - DisplayValuesSupported []string `json:"display_values_supported,omitempty"` - ClaimTypesSupported []string `json:"claim_types_supported,omitempty"` - ClaimsSupported []string `json:"claims_supported,omitempty"` - ServiceDocs string `json:"service_documentation,omitempty"` - ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"` - UILocalsSupported []string `json:"ui_locales_supported,omitempty"` - ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"` - RequestParameterSupported bool `json:"request_parameter_supported,omitempty"` - RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"` - RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"` - - Policy string `json:"op_policy_uri,omitempty"` - TermsOfService string `json:"op_tos_uri,omitempty"` -} - -func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig { - return encodableProviderConfig{ - Issuer: uriToString(cfg.Issuer), - AuthEndpoint: uriToString(cfg.AuthEndpoint), - TokenEndpoint: uriToString(cfg.TokenEndpoint), - UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint), - KeysEndpoint: uriToString(cfg.KeysEndpoint), - RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint), - EndSessionEndpoint: uriToString(cfg.EndSessionEndpoint), - CheckSessionIFrame: uriToString(cfg.CheckSessionIFrame), - ScopesSupported: cfg.ScopesSupported, - ResponseTypesSupported: cfg.ResponseTypesSupported, - ResponseModesSupported: cfg.ResponseModesSupported, - GrantTypesSupported: cfg.GrantTypesSupported, - ACRValuesSupported: cfg.ACRValuesSupported, - SubjectTypesSupported: cfg.SubjectTypesSupported, - IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues, - IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues, - IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues, - UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues, - UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues, - UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues, - ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues, - ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues, - ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues, - TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported, - TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported, - DisplayValuesSupported: cfg.DisplayValuesSupported, - ClaimTypesSupported: cfg.ClaimTypesSupported, - ClaimsSupported: cfg.ClaimsSupported, - ServiceDocs: uriToString(cfg.ServiceDocs), - ClaimsLocalsSupported: cfg.ClaimsLocalsSupported, - UILocalsSupported: cfg.UILocalsSupported, - ClaimsParameterSupported: cfg.ClaimsParameterSupported, - RequestParameterSupported: cfg.RequestParameterSupported, - RequestURIParamaterSupported: cfg.RequestURIParamaterSupported, - RequireRequestURIRegistration: cfg.RequireRequestURIRegistration, - Policy: uriToString(cfg.Policy), - TermsOfService: uriToString(cfg.TermsOfService), - } -} - -func (e encodableProviderConfig) toStruct() (ProviderConfig, error) { - p := stickyErrParser{} - conf := ProviderConfig{ - Issuer: p.parseURI(e.Issuer, "issuer"), - AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"), - TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"), - UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"), - KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"), - RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"), - EndSessionEndpoint: p.parseURI(e.EndSessionEndpoint, "end_session_endpoint"), - CheckSessionIFrame: p.parseURI(e.CheckSessionIFrame, "check_session_iframe"), - ScopesSupported: e.ScopesSupported, - ResponseTypesSupported: e.ResponseTypesSupported, - ResponseModesSupported: e.ResponseModesSupported, - GrantTypesSupported: e.GrantTypesSupported, - ACRValuesSupported: e.ACRValuesSupported, - SubjectTypesSupported: e.SubjectTypesSupported, - IDTokenSigningAlgValues: e.IDTokenSigningAlgValues, - IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues, - IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues, - UserInfoSigningAlgValues: e.UserInfoSigningAlgValues, - UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues, - UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues, - ReqObjSigningAlgValues: e.ReqObjSigningAlgValues, - ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues, - ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues, - TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported, - TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported, - DisplayValuesSupported: e.DisplayValuesSupported, - ClaimTypesSupported: e.ClaimTypesSupported, - ClaimsSupported: e.ClaimsSupported, - ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"), - ClaimsLocalsSupported: e.ClaimsLocalsSupported, - UILocalsSupported: e.UILocalsSupported, - ClaimsParameterSupported: e.ClaimsParameterSupported, - RequestParameterSupported: e.RequestParameterSupported, - RequestURIParamaterSupported: e.RequestURIParamaterSupported, - RequireRequestURIRegistration: e.RequireRequestURIRegistration, - Policy: p.parseURI(e.Policy, "op_policy-uri"), - TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"), - } - if p.firstErr != nil { - return ProviderConfig{}, p.firstErr - } - return conf, nil -} - -// Empty returns if a ProviderConfig holds no information. -// -// This case generally indicates a ProviderConfigGetter has experienced an error -// and has nothing to report. -func (p ProviderConfig) Empty() bool { - return p.Issuer == nil -} - -func contains(sli []string, ele string) bool { - for _, s := range sli { - if s == ele { - return true - } - } - return false -} - -// Valid determines if a ProviderConfig conforms with the OIDC specification. -// If Valid returns successfully it guarantees required field are non-nil and -// URLs are well formed. -// -// Valid is called by UnmarshalJSON. -// -// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for -// URLs fields where the OIDC spec requires it. This may change in future releases -// of this package. See: https://github.com/coreos/go-oidc/issues/34 -func (p ProviderConfig) Valid() error { - grantTypes := p.GrantTypesSupported - if len(grantTypes) == 0 { - grantTypes = DefaultGrantTypesSupported - } - implicitOnly := true - for _, grantType := range grantTypes { - if grantType != oauth2.GrantTypeImplicit { - implicitOnly = false - break - } - } - - if len(p.SubjectTypesSupported) == 0 { - return errors.New("missing required field subject_types_supported") - } - if len(p.IDTokenSigningAlgValues) == 0 { - return errors.New("missing required field id_token_signing_alg_values_supported") - } - - if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") { - return errors.New("scoped_supported must be unspecified or include 'openid'") - } - - if !contains(p.IDTokenSigningAlgValues, "RS256") { - return errors.New("id_token_signing_alg_values_supported must include 'RS256'") - } - - uris := []struct { - val *url.URL - name string - required bool - }{ - {p.Issuer, "issuer", true}, - {p.AuthEndpoint, "authorization_endpoint", true}, - {p.TokenEndpoint, "token_endpoint", !implicitOnly}, - {p.UserInfoEndpoint, "userinfo_endpoint", false}, - {p.KeysEndpoint, "jwks_uri", true}, - {p.RegistrationEndpoint, "registration_endpoint", false}, - {p.EndSessionEndpoint, "end_session_endpoint", false}, - {p.CheckSessionIFrame, "check_session_iframe", false}, - {p.ServiceDocs, "service_documentation", false}, - {p.Policy, "op_policy_uri", false}, - {p.TermsOfService, "op_tos_uri", false}, - } - - for _, uri := range uris { - if uri.val == nil { - if !uri.required { - continue - } - return fmt.Errorf("empty value for required uri field %s", uri.name) - } - if uri.val.Host == "" { - return fmt.Errorf("no host for uri field %s", uri.name) - } - if uri.val.Scheme != "http" && uri.val.Scheme != "https" { - return fmt.Errorf("uri field %s schemeis not http or https", uri.name) - } - } - return nil -} - -// Supports determines if provider supports a client given their respective metadata. -func (p ProviderConfig) Supports(c ClientMetadata) error { - if err := p.Valid(); err != nil { - return fmt.Errorf("invalid provider config: %v", err) - } - if err := c.Valid(); err != nil { - return fmt.Errorf("invalid client config: %v", err) - } - - // Fill default values for omitted fields - c = c.Defaults() - p = p.Defaults() - - // Do the supported values list the requested one? - supports := []struct { - supported []string - requested string - name string - }{ - {p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"}, - {p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"}, - {p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"}, - {p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"}, - {p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"}, - {p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"}, - {p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"}, - {p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"}, - {p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"}, - } - for _, field := range supports { - if field.requested == "" { - continue - } - if !contains(field.supported, field.requested) { - return fmt.Errorf("provider does not support requested value for field %s", field.name) - } - } - - stringsEqual := func(s1, s2 string) bool { return s1 == s2 } - - // For lists, are the list of requested values a subset of the supported ones? - supportsAll := []struct { - supported []string - requested []string - name string - // OAuth2.0 response_type can be space separated lists where order doesn't matter. - // For example "id_token token" is the same as "token id_token" - // Support a custom compare method. - comp func(s1, s2 string) bool - }{ - {p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual}, - {p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual}, - } - for _, field := range supportsAll { - requestLoop: - for _, req := range field.requested { - for _, sup := range field.supported { - if field.comp(req, sup) { - continue requestLoop - } - } - return fmt.Errorf("provider does not support requested value for field %s", field.name) - } - } - - // TODO(ericchiang): Are there more checks we feel comfortable with begin strict about? - - return nil -} - -func (p ProviderConfig) SupportsGrantType(grantType string) bool { - var supported []string - if len(p.GrantTypesSupported) == 0 { - supported = DefaultGrantTypesSupported - } else { - supported = p.GrantTypesSupported - } - - for _, t := range supported { - if t == grantType { - return true - } - } - return false -} - -type ProviderConfigGetter interface { - Get() (ProviderConfig, error) -} - -type ProviderConfigSetter interface { - Set(ProviderConfig) error -} - -type ProviderConfigSyncer struct { - from ProviderConfigGetter - to ProviderConfigSetter - clock clockwork.Clock - - initialSyncDone bool - initialSyncWait sync.WaitGroup -} - -func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer { - return &ProviderConfigSyncer{ - from: from, - to: to, - clock: clockwork.NewRealClock(), - } -} - -func (s *ProviderConfigSyncer) Run() chan struct{} { - stop := make(chan struct{}) - - var next pcsStepper - next = &pcsStepNext{aft: time.Duration(0)} - - s.initialSyncWait.Add(1) - go func() { - for { - select { - case <-s.clock.After(next.after()): - next = next.step(s.sync) - case <-stop: - return - } - } - }() - - return stop -} - -func (s *ProviderConfigSyncer) WaitUntilInitialSync() { - s.initialSyncWait.Wait() -} - -func (s *ProviderConfigSyncer) sync() (time.Duration, error) { - cfg, err := s.from.Get() - if err != nil { - return 0, err - } - - if err = s.to.Set(cfg); err != nil { - return 0, fmt.Errorf("error setting provider config: %v", err) - } - - if !s.initialSyncDone { - s.initialSyncWait.Done() - s.initialSyncDone = true - } - - return nextSyncAfter(cfg.ExpiresAt, s.clock), nil -} - -type pcsStepFunc func() (time.Duration, error) - -type pcsStepper interface { - after() time.Duration - step(pcsStepFunc) pcsStepper -} - -type pcsStepNext struct { - aft time.Duration -} - -func (n *pcsStepNext) after() time.Duration { - return n.aft -} - -func (n *pcsStepNext) step(fn pcsStepFunc) (next pcsStepper) { - ttl, err := fn() - if err == nil { - next = &pcsStepNext{aft: ttl} - } else { - next = &pcsStepRetry{aft: time.Second} - log.Printf("go-oidc: provider config sync failed, retrying in %v: %v", next.after(), err) - } - return -} - -type pcsStepRetry struct { - aft time.Duration -} - -func (r *pcsStepRetry) after() time.Duration { - return r.aft -} - -func (r *pcsStepRetry) step(fn pcsStepFunc) (next pcsStepper) { - ttl, err := fn() - if err == nil { - next = &pcsStepNext{aft: ttl} - } else { - next = &pcsStepRetry{aft: timeutil.ExpBackoff(r.aft, time.Minute)} - log.Printf("go-oidc: provider config sync failed, retrying in %v: %v", next.after(), err) - } - return -} - -func nextSyncAfter(exp time.Time, clock clockwork.Clock) time.Duration { - if exp.IsZero() { - return MaximumProviderConfigSyncInterval - } - - t := exp.Sub(clock.Now()) / 2 - if t > MaximumProviderConfigSyncInterval { - t = MaximumProviderConfigSyncInterval - } else if t < minimumProviderConfigSyncInterval { - t = minimumProviderConfigSyncInterval - } - - return t -} - -type httpProviderConfigGetter struct { - hc phttp.Client - issuerURL string - clock clockwork.Clock -} - -func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProviderConfigGetter { - return &httpProviderConfigGetter{ - hc: hc, - issuerURL: issuerURL, - clock: clockwork.NewRealClock(), - } -} - -func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) { - // If the Issuer value contains a path component, any terminating / MUST be removed before - // appending /.well-known/openid-configuration. - // https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest - discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath - req, err := http.NewRequest("GET", discoveryURL, nil) - if err != nil { - return - } - - resp, err := r.hc.Do(req) - if err != nil { - return - } - defer resp.Body.Close() - - if err = json.NewDecoder(resp.Body).Decode(&cfg); err != nil { - return - } - - var ttl time.Duration - var ok bool - ttl, ok, err = phttp.Cacheable(resp.Header) - if err != nil { - return - } else if ok { - cfg.ExpiresAt = r.clock.Now().UTC().Add(ttl) - } - - // The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information. - // http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation - if !urlEqual(cfg.Issuer.String(), r.issuerURL) { - err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL) - return - } - - return -} - -func FetchProviderConfig(hc phttp.Client, issuerURL string) (ProviderConfig, error) { - if hc == nil { - hc = http.DefaultClient - } - - g := NewHTTPProviderConfigGetter(hc, issuerURL) - return g.Get() -} - -func WaitForProviderConfig(hc phttp.Client, issuerURL string) (pcfg ProviderConfig) { - return waitForProviderConfig(hc, issuerURL, clockwork.NewRealClock()) -} - -func waitForProviderConfig(hc phttp.Client, issuerURL string, clock clockwork.Clock) (pcfg ProviderConfig) { - var sleep time.Duration - var err error - for { - pcfg, err = FetchProviderConfig(hc, issuerURL) - if err == nil { - break - } - - sleep = timeutil.ExpBackoff(sleep, time.Minute) - fmt.Printf("Failed fetching provider config, trying again in %v: %v\n", sleep, err) - time.Sleep(sleep) - } - - return -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/transport.go b/vendor/github.com/coreos/go-oidc/oidc/transport.go deleted file mode 100644 index 61c926d7..00000000 --- a/vendor/github.com/coreos/go-oidc/oidc/transport.go +++ /dev/null @@ -1,88 +0,0 @@ -package oidc - -import ( - "fmt" - "net/http" - "sync" - - phttp "github.com/coreos/go-oidc/http" - "github.com/coreos/go-oidc/jose" -) - -type TokenRefresher interface { - // Verify checks if the provided token is currently valid or not. - Verify(jose.JWT) error - - // Refresh attempts to authenticate and retrieve a new token. - Refresh() (jose.JWT, error) -} - -type ClientCredsTokenRefresher struct { - Issuer string - OIDCClient *Client -} - -func (c *ClientCredsTokenRefresher) Verify(jwt jose.JWT) (err error) { - _, err = VerifyClientClaims(jwt, c.Issuer) - return -} - -func (c *ClientCredsTokenRefresher) Refresh() (jwt jose.JWT, err error) { - if err = c.OIDCClient.Healthy(); err != nil { - err = fmt.Errorf("unable to authenticate, unhealthy OIDC client: %v", err) - return - } - - jwt, err = c.OIDCClient.ClientCredsToken([]string{"openid"}) - if err != nil { - err = fmt.Errorf("unable to verify auth code with issuer: %v", err) - return - } - - return -} - -type AuthenticatedTransport struct { - TokenRefresher - http.RoundTripper - - mu sync.Mutex - jwt jose.JWT -} - -func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) { - t.mu.Lock() - defer t.mu.Unlock() - - if t.TokenRefresher.Verify(t.jwt) == nil { - return t.jwt, nil - } - - jwt, err := t.TokenRefresher.Refresh() - if err != nil { - return jose.JWT{}, fmt.Errorf("unable to acquire valid JWT: %v", err) - } - - t.jwt = jwt - return t.jwt, nil -} - -// SetJWT sets the JWT held by the Transport. -// This is useful for cases in which you want to set an initial JWT. -func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) { - t.mu.Lock() - defer t.mu.Unlock() - - t.jwt = jwt -} - -func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) { - jwt, err := t.verifiedJWT() - if err != nil { - return nil, err - } - - req := phttp.CopyRequest(r) - req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", jwt.Encode())) - return t.RoundTripper.RoundTrip(req) -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/util.go b/vendor/github.com/coreos/go-oidc/oidc/util.go deleted file mode 100644 index f2a5a195..00000000 --- a/vendor/github.com/coreos/go-oidc/oidc/util.go +++ /dev/null @@ -1,109 +0,0 @@ -package oidc - -import ( - "crypto/rand" - "encoding/base64" - "errors" - "fmt" - "net" - "net/http" - "net/url" - "strings" - "time" - - "github.com/coreos/go-oidc/jose" -) - -// RequestTokenExtractor funcs extract a raw encoded token from a request. -type RequestTokenExtractor func(r *http.Request) (string, error) - -// ExtractBearerToken is a RequestTokenExtractor which extracts a bearer token from a request's -// Authorization header. -func ExtractBearerToken(r *http.Request) (string, error) { - ah := r.Header.Get("Authorization") - if ah == "" { - return "", errors.New("missing Authorization header") - } - - if len(ah) <= 6 || strings.ToUpper(ah[0:6]) != "BEARER" { - return "", errors.New("should be a bearer token") - } - - val := ah[7:] - if len(val) == 0 { - return "", errors.New("bearer token is empty") - } - - return val, nil -} - -// CookieTokenExtractor returns a RequestTokenExtractor which extracts a token from the named cookie in a request. -func CookieTokenExtractor(cookieName string) RequestTokenExtractor { - return func(r *http.Request) (string, error) { - ck, err := r.Cookie(cookieName) - if err != nil { - return "", fmt.Errorf("token cookie not found in request: %v", err) - } - - if ck.Value == "" { - return "", errors.New("token cookie found but is empty") - } - - return ck.Value, nil - } -} - -func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims { - return jose.Claims{ - // required - "iss": iss, - "sub": sub, - "aud": aud, - "iat": iat.Unix(), - "exp": exp.Unix(), - } -} - -func GenClientID(hostport string) (string, error) { - b, err := randBytes(32) - if err != nil { - return "", err - } - - var host string - if strings.Contains(hostport, ":") { - host, _, err = net.SplitHostPort(hostport) - if err != nil { - return "", err - } - } else { - host = hostport - } - - return fmt.Sprintf("%s@%s", base64.URLEncoding.EncodeToString(b), host), nil -} - -func randBytes(n int) ([]byte, error) { - b := make([]byte, n) - got, err := rand.Read(b) - if err != nil { - return nil, err - } else if n != got { - return nil, errors.New("unable to generate enough random data") - } - return b, nil -} - -// urlEqual checks two urls for equality using only the host and path portions. -func urlEqual(url1, url2 string) bool { - u1, err := url.Parse(url1) - if err != nil { - return false - } - u2, err := url.Parse(url2) - if err != nil { - return false - } - - return strings.ToLower(u1.Host+u1.Path) == strings.ToLower(u2.Host+u2.Path) -} diff --git a/vendor/github.com/coreos/go-oidc/oidc/verification.go b/vendor/github.com/coreos/go-oidc/oidc/verification.go deleted file mode 100644 index d9c6afa6..00000000 --- a/vendor/github.com/coreos/go-oidc/oidc/verification.go +++ /dev/null @@ -1,190 +0,0 @@ -package oidc - -import ( - "errors" - "fmt" - "time" - - "github.com/jonboulle/clockwork" - - "github.com/coreos/go-oidc/jose" - "github.com/coreos/go-oidc/key" -) - -func VerifySignature(jwt jose.JWT, keys []key.PublicKey) (bool, error) { - jwtBytes := []byte(jwt.Data()) - for _, k := range keys { - v, err := k.Verifier() - if err != nil { - return false, err - } - if v.Verify(jwt.Signature, jwtBytes) == nil { - return true, nil - } - } - return false, nil -} - -// containsString returns true if the given string(needle) is found -// in the string array(haystack). -func containsString(needle string, haystack []string) bool { - for _, v := range haystack { - if v == needle { - return true - } - } - return false -} - -// Verify claims in accordance with OIDC spec -// http://openid.net/specs/openid-connect-basic-1_0.html#IDTokenValidation -func VerifyClaims(jwt jose.JWT, issuer, clientID string) error { - now := time.Now().UTC() - - claims, err := jwt.Claims() - if err != nil { - return err - } - - ident, err := IdentityFromClaims(claims) - if err != nil { - return err - } - - if ident.ExpiresAt.Before(now) { - return errors.New("token is expired") - } - - // iss REQUIRED. Issuer Identifier for the Issuer of the response. - // The iss value is a case sensitive URL using the https scheme that contains scheme, - // host, and optionally, port number and path components and no query or fragment components. - if iss, exists := claims["iss"].(string); exists { - if !urlEqual(iss, issuer) { - return fmt.Errorf("invalid claim value: 'iss'. expected=%s, found=%s.", issuer, iss) - } - } else { - return errors.New("missing claim: 'iss'") - } - - // iat REQUIRED. Time at which the JWT was issued. - // Its value is a JSON number representing the number of seconds from 1970-01-01T0:0:0Z - // as measured in UTC until the date/time. - if _, exists := claims["iat"].(float64); !exists { - return errors.New("missing claim: 'iat'") - } - - // aud REQUIRED. Audience(s) that this ID Token is intended for. - // It MUST contain the OAuth 2.0 client_id of the Relying Party as an audience value. - // It MAY also contain identifiers for other audiences. In the general case, the aud - // value is an array of case sensitive strings. In the common special case when there - // is one audience, the aud value MAY be a single case sensitive string. - if aud, ok, err := claims.StringClaim("aud"); err == nil && ok { - if aud != clientID { - return fmt.Errorf("invalid claims, 'aud' claim and 'client_id' do not match, aud=%s, client_id=%s", aud, clientID) - } - } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok { - if !containsString(clientID, aud) { - return fmt.Errorf("invalid claims, cannot find 'client_id' in 'aud' claim, aud=%v, client_id=%s", aud, clientID) - } - } else { - return errors.New("invalid claim value: 'aud' is required, and should be either string or string array") - } - - return nil -} - -// VerifyClientClaims verifies all the required claims are valid for a "client credentials" JWT. -// Returns the client ID if valid, or an error if invalid. -func VerifyClientClaims(jwt jose.JWT, issuer string) (string, error) { - claims, err := jwt.Claims() - if err != nil { - return "", fmt.Errorf("failed to parse JWT claims: %v", err) - } - - iss, ok, err := claims.StringClaim("iss") - if err != nil { - return "", fmt.Errorf("failed to parse 'iss' claim: %v", err) - } else if !ok { - return "", errors.New("missing required 'iss' claim") - } else if !urlEqual(iss, issuer) { - return "", fmt.Errorf("'iss' claim does not match expected issuer, iss=%s", iss) - } - - sub, ok, err := claims.StringClaim("sub") - if err != nil { - return "", fmt.Errorf("failed to parse 'sub' claim: %v", err) - } else if !ok { - return "", errors.New("missing required 'sub' claim") - } - - if aud, ok, err := claims.StringClaim("aud"); err == nil && ok { - if aud != sub { - return "", fmt.Errorf("invalid claims, 'aud' claim and 'sub' claim do not match, aud=%s, sub=%s", aud, sub) - } - } else if aud, ok, err := claims.StringsClaim("aud"); err == nil && ok { - if !containsString(sub, aud) { - return "", fmt.Errorf("invalid claims, cannot find 'sud' in 'aud' claim, aud=%v, sub=%s", aud, sub) - } - } else { - return "", errors.New("invalid claim value: 'aud' is required, and should be either string or string array") - } - - now := time.Now().UTC() - exp, ok, err := claims.TimeClaim("exp") - if err != nil { - return "", fmt.Errorf("failed to parse 'exp' claim: %v", err) - } else if !ok { - return "", errors.New("missing required 'exp' claim") - } else if exp.Before(now) { - return "", fmt.Errorf("token already expired at: %v", exp) - } - - return sub, nil -} - -type JWTVerifier struct { - issuer string - clientID string - syncFunc func() error - keysFunc func() []key.PublicKey - clock clockwork.Clock -} - -func NewJWTVerifier(issuer, clientID string, syncFunc func() error, keysFunc func() []key.PublicKey) JWTVerifier { - return JWTVerifier{ - issuer: issuer, - clientID: clientID, - syncFunc: syncFunc, - keysFunc: keysFunc, - clock: clockwork.NewRealClock(), - } -} - -func (v *JWTVerifier) Verify(jwt jose.JWT) error { - // Verify claims before verifying the signature. This is an optimization to throw out - // tokens we know are invalid without undergoing an expensive signature check and - // possibly a re-sync event. - if err := VerifyClaims(jwt, v.issuer, v.clientID); err != nil { - return fmt.Errorf("oidc: JWT claims invalid: %v", err) - } - - ok, err := VerifySignature(jwt, v.keysFunc()) - if err != nil { - return fmt.Errorf("oidc: JWT signature verification failed: %v", err) - } else if ok { - return nil - } - - if err = v.syncFunc(); err != nil { - return fmt.Errorf("oidc: failed syncing KeySet: %v", err) - } - - ok, err = VerifySignature(jwt, v.keysFunc()) - if err != nil { - return fmt.Errorf("oidc: JWT signature verification failed: %v", err) - } else if !ok { - return errors.New("oidc: unable to verify JWT signature: no matching keys") - } - - return nil -} diff --git a/vendor/github.com/coreos/pkg/LICENSE b/vendor/github.com/coreos/pkg/LICENSE deleted file mode 100644 index e06d2081..00000000 --- a/vendor/github.com/coreos/pkg/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - diff --git a/vendor/github.com/coreos/pkg/NOTICE b/vendor/github.com/coreos/pkg/NOTICE deleted file mode 100644 index b39ddfa5..00000000 --- a/vendor/github.com/coreos/pkg/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -CoreOS Project -Copyright 2014 CoreOS, Inc - -This product includes software developed at CoreOS, Inc. -(http://www.coreos.com/). diff --git a/vendor/github.com/coreos/pkg/health/README.md b/vendor/github.com/coreos/pkg/health/README.md deleted file mode 100644 index 5ec34c21..00000000 --- a/vendor/github.com/coreos/pkg/health/README.md +++ /dev/null @@ -1,11 +0,0 @@ -health -==== - -A simple framework for implementing an HTTP health check endpoint on servers. - -Users implement their `health.Checkable` types, and create a `health.Checker`, from which they can get an `http.HandlerFunc` using `health.Checker.MakeHealthHandlerFunc`. - -### Documentation - -For more details, visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/health) - diff --git a/vendor/github.com/coreos/pkg/health/health.go b/vendor/github.com/coreos/pkg/health/health.go deleted file mode 100644 index a1c3610f..00000000 --- a/vendor/github.com/coreos/pkg/health/health.go +++ /dev/null @@ -1,127 +0,0 @@ -package health - -import ( - "expvar" - "fmt" - "log" - "net/http" - - "github.com/coreos/pkg/httputil" -) - -// Checkables should return nil when the thing they are checking is healthy, and an error otherwise. -type Checkable interface { - Healthy() error -} - -// Checker provides a way to make an endpoint which can be probed for system health. -type Checker struct { - // Checks are the Checkables to be checked when probing. - Checks []Checkable - - // Unhealthyhandler is called when one or more of the checks are unhealthy. - // If not provided DefaultUnhealthyHandler is called. - UnhealthyHandler UnhealthyHandler - - // HealthyHandler is called when all checks are healthy. - // If not provided, DefaultHealthyHandler is called. - HealthyHandler http.HandlerFunc -} - -func (c Checker) ServeHTTP(w http.ResponseWriter, r *http.Request) { - unhealthyHandler := c.UnhealthyHandler - if unhealthyHandler == nil { - unhealthyHandler = DefaultUnhealthyHandler - } - - successHandler := c.HealthyHandler - if successHandler == nil { - successHandler = DefaultHealthyHandler - } - - if r.Method != "GET" { - w.Header().Set("Allow", "GET") - w.WriteHeader(http.StatusMethodNotAllowed) - return - } - - if err := Check(c.Checks); err != nil { - unhealthyHandler(w, r, err) - return - } - - successHandler(w, r) -} - -type UnhealthyHandler func(w http.ResponseWriter, r *http.Request, err error) - -type StatusResponse struct { - Status string `json:"status"` - Details *StatusResponseDetails `json:"details,omitempty"` -} - -type StatusResponseDetails struct { - Code int `json:"code,omitempty"` - Message string `json:"message,omitempty"` -} - -func Check(checks []Checkable) (err error) { - errs := []error{} - for _, c := range checks { - if e := c.Healthy(); e != nil { - errs = append(errs, e) - } - } - - switch len(errs) { - case 0: - err = nil - case 1: - err = errs[0] - default: - err = fmt.Errorf("multiple health check failure: %v", errs) - } - - return -} - -func DefaultHealthyHandler(w http.ResponseWriter, r *http.Request) { - err := httputil.WriteJSONResponse(w, http.StatusOK, StatusResponse{ - Status: "ok", - }) - if err != nil { - // TODO(bobbyrullo): replace with logging from new logging pkg, - // once it lands. - log.Printf("Failed to write JSON response: %v", err) - } -} - -func DefaultUnhealthyHandler(w http.ResponseWriter, r *http.Request, err error) { - writeErr := httputil.WriteJSONResponse(w, http.StatusInternalServerError, StatusResponse{ - Status: "error", - Details: &StatusResponseDetails{ - Code: http.StatusInternalServerError, - Message: err.Error(), - }, - }) - if writeErr != nil { - // TODO(bobbyrullo): replace with logging from new logging pkg, - // once it lands. - log.Printf("Failed to write JSON response: %v", err) - } -} - -// ExpvarHandler is copied from https://golang.org/src/expvar/expvar.go, where it's sadly unexported. -func ExpvarHandler(w http.ResponseWriter, r *http.Request) { - w.Header().Set("Content-Type", "application/json; charset=utf-8") - fmt.Fprintf(w, "{\n") - first := true - expvar.Do(func(kv expvar.KeyValue) { - if !first { - fmt.Fprintf(w, ",\n") - } - first = false - fmt.Fprintf(w, "%q: %s", kv.Key, kv.Value) - }) - fmt.Fprintf(w, "\n}\n") -} diff --git a/vendor/github.com/coreos/pkg/httputil/README.md b/vendor/github.com/coreos/pkg/httputil/README.md deleted file mode 100644 index 44fa751c..00000000 --- a/vendor/github.com/coreos/pkg/httputil/README.md +++ /dev/null @@ -1,13 +0,0 @@ -httputil -==== - -Common code for dealing with HTTP. - -Includes: - -* Code for returning JSON responses. - -### Documentation - -Visit the docs on [gopkgdoc](http://godoc.org/github.com/coreos/pkg/httputil) - diff --git a/vendor/github.com/coreos/pkg/httputil/cookie.go b/vendor/github.com/coreos/pkg/httputil/cookie.go deleted file mode 100644 index c37a37bb..00000000 --- a/vendor/github.com/coreos/pkg/httputil/cookie.go +++ /dev/null @@ -1,21 +0,0 @@ -package httputil - -import ( - "net/http" - "time" -) - -// DeleteCookies effectively deletes all named cookies -// by wiping all data and setting to expire immediately. -func DeleteCookies(w http.ResponseWriter, cookieNames ...string) { - for _, n := range cookieNames { - c := &http.Cookie{ - Name: n, - Value: "", - Path: "/", - MaxAge: -1, - Expires: time.Time{}, - } - http.SetCookie(w, c) - } -} diff --git a/vendor/github.com/coreos/pkg/httputil/json.go b/vendor/github.com/coreos/pkg/httputil/json.go deleted file mode 100644 index 0b092350..00000000 --- a/vendor/github.com/coreos/pkg/httputil/json.go +++ /dev/null @@ -1,27 +0,0 @@ -package httputil - -import ( - "encoding/json" - "net/http" -) - -const ( - JSONContentType = "application/json" -) - -func WriteJSONResponse(w http.ResponseWriter, code int, resp interface{}) error { - enc, err := json.Marshal(resp) - if err != nil { - w.WriteHeader(http.StatusInternalServerError) - return err - } - - w.Header().Set("Content-Type", JSONContentType) - w.WriteHeader(code) - - _, err = w.Write(enc) - if err != nil { - return err - } - return nil -} diff --git a/vendor/github.com/coreos/pkg/timeutil/backoff.go b/vendor/github.com/coreos/pkg/timeutil/backoff.go deleted file mode 100644 index b34fb496..00000000 --- a/vendor/github.com/coreos/pkg/timeutil/backoff.go +++ /dev/null @@ -1,15 +0,0 @@ -package timeutil - -import ( - "time" -) - -func ExpBackoff(prev, max time.Duration) time.Duration { - if prev == 0 { - return time.Second - } - if prev > max/2 { - return max - } - return 2 * prev -} diff --git a/vendor/github.com/jonboulle/clockwork/.gitignore b/vendor/github.com/jonboulle/clockwork/.gitignore deleted file mode 100644 index 010c242b..00000000 --- a/vendor/github.com/jonboulle/clockwork/.gitignore +++ /dev/null @@ -1,25 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test - -*.swp diff --git a/vendor/github.com/jonboulle/clockwork/.travis.yml b/vendor/github.com/jonboulle/clockwork/.travis.yml deleted file mode 100644 index aefda90b..00000000 --- a/vendor/github.com/jonboulle/clockwork/.travis.yml +++ /dev/null @@ -1,5 +0,0 @@ -language: go -go: - - 1.3 - -sudo: false diff --git a/vendor/github.com/jonboulle/clockwork/LICENSE b/vendor/github.com/jonboulle/clockwork/LICENSE deleted file mode 100644 index 5c304d1a..00000000 --- a/vendor/github.com/jonboulle/clockwork/LICENSE +++ /dev/null @@ -1,201 +0,0 @@ -Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright {yyyy} {name of copyright owner} - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md deleted file mode 100644 index d43a6c79..00000000 --- a/vendor/github.com/jonboulle/clockwork/README.md +++ /dev/null @@ -1,61 +0,0 @@ -clockwork -========= - -[![Build Status](https://travis-ci.org/jonboulle/clockwork.png?branch=master)](https://travis-ci.org/jonboulle/clockwork) -[![godoc](https://godoc.org/github.com/jonboulle/clockwork?status.svg)](http://godoc.org/github.com/jonboulle/clockwork) - -a simple fake clock for golang - -# Usage - -Replace uses of the `time` package with the `clockwork.Clock` interface instead. - -For example, instead of using `time.Sleep` directly: - -``` -func my_func() { - time.Sleep(3 * time.Second) - do_something() -} -``` - -inject a clock and use its `Sleep` method instead: - -``` -func my_func(clock clockwork.Clock) { - clock.Sleep(3 * time.Second) - do_something() -} -``` - -Now you can easily test `my_func` with a `FakeClock`: - -``` -func TestMyFunc(t *testing.T) { - c := clockwork.NewFakeClock() - - // Start our sleepy function - my_func(c) - - // Ensure we wait until my_func is sleeping - c.BlockUntil(1) - - assert_state() - - // Advance the FakeClock forward in time - c.Advance(3) - - assert_state() -} -``` - -and in production builds, simply inject the real clock instead: -``` -my_func(clockwork.NewRealClock()) -``` - -See [example_test.go](example_test.go) for a full example. - -# Credits - -clockwork is inspired by @wickman's [threaded fake clock](https://gist.github.com/wickman/3840816), and the [Golang playground](http://blog.golang.org/playground#Faking time) diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go deleted file mode 100644 index 9ec96ed2..00000000 --- a/vendor/github.com/jonboulle/clockwork/clockwork.go +++ /dev/null @@ -1,169 +0,0 @@ -package clockwork - -import ( - "sync" - "time" -) - -// Clock provides an interface that packages can use instead of directly -// using the time module, so that chronology-related behavior can be tested -type Clock interface { - After(d time.Duration) <-chan time.Time - Sleep(d time.Duration) - Now() time.Time -} - -// FakeClock provides an interface for a clock which can be -// manually advanced through time -type FakeClock interface { - Clock - // Advance advances the FakeClock to a new point in time, ensuring any existing - // sleepers are notified appropriately before returning - Advance(d time.Duration) - // BlockUntil will block until the FakeClock has the given number of - // sleepers (callers of Sleep or After) - BlockUntil(n int) -} - -// NewRealClock returns a Clock which simply delegates calls to the actual time -// package; it should be used by packages in production. -func NewRealClock() Clock { - return &realClock{} -} - -// NewFakeClock returns a FakeClock implementation which can be -// manually advanced through time for testing. The initial time of the -// FakeClock will be an arbitrary non-zero time. -func NewFakeClock() FakeClock { - // use a fixture that does not fulfill Time.IsZero() - return NewFakeClockAt(time.Date(1984, time.April, 4, 0, 0, 0, 0, time.UTC)) -} - -// NewFakeClockAt returns a FakeClock initialised at the given time.Time. -func NewFakeClockAt(t time.Time) FakeClock { - return &fakeClock{ - time: t, - } -} - -type realClock struct{} - -func (rc *realClock) After(d time.Duration) <-chan time.Time { - return time.After(d) -} - -func (rc *realClock) Sleep(d time.Duration) { - time.Sleep(d) -} - -func (rc *realClock) Now() time.Time { - return time.Now() -} - -type fakeClock struct { - sleepers []*sleeper - blockers []*blocker - time time.Time - - l sync.RWMutex -} - -// sleeper represents a caller of After or Sleep -type sleeper struct { - until time.Time - done chan time.Time -} - -// blocker represents a caller of BlockUntil -type blocker struct { - count int - ch chan struct{} -} - -// After mimics time.After; it waits for the given duration to elapse on the -// fakeClock, then sends the current time on the returned channel. -func (fc *fakeClock) After(d time.Duration) <-chan time.Time { - fc.l.Lock() - defer fc.l.Unlock() - now := fc.time - done := make(chan time.Time, 1) - if d.Nanoseconds() == 0 { - // special case - trigger immediately - done <- now - } else { - // otherwise, add to the set of sleepers - s := &sleeper{ - until: now.Add(d), - done: done, - } - fc.sleepers = append(fc.sleepers, s) - // and notify any blockers - fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) - } - return done -} - -// notifyBlockers notifies all the blockers waiting until the -// given number of sleepers are waiting on the fakeClock. It -// returns an updated slice of blockers (i.e. those still waiting) -func notifyBlockers(blockers []*blocker, count int) (newBlockers []*blocker) { - for _, b := range blockers { - if b.count == count { - close(b.ch) - } else { - newBlockers = append(newBlockers, b) - } - } - return -} - -// Sleep blocks until the given duration has passed on the fakeClock -func (fc *fakeClock) Sleep(d time.Duration) { - <-fc.After(d) -} - -// Time returns the current time of the fakeClock -func (fc *fakeClock) Now() time.Time { - fc.l.RLock() - t := fc.time - fc.l.RUnlock() - return t -} - -// Advance advances fakeClock to a new point in time, ensuring channels from any -// previous invocations of After are notified appropriately before returning -func (fc *fakeClock) Advance(d time.Duration) { - fc.l.Lock() - defer fc.l.Unlock() - end := fc.time.Add(d) - var newSleepers []*sleeper - for _, s := range fc.sleepers { - if end.Sub(s.until) >= 0 { - s.done <- end - } else { - newSleepers = append(newSleepers, s) - } - } - fc.sleepers = newSleepers - fc.blockers = notifyBlockers(fc.blockers, len(fc.sleepers)) - fc.time = end -} - -// BlockUntil will block until the fakeClock has the given number of sleepers -// (callers of Sleep or After) -func (fc *fakeClock) BlockUntil(n int) { - fc.l.Lock() - // Fast path: current number of sleepers is what we're looking for - if len(fc.sleepers) == n { - fc.l.Unlock() - return - } - // Otherwise, set up a new blocker - b := &blocker{ - count: n, - ch: make(chan struct{}), - } - fc.blockers = append(fc.blockers, b) - fc.l.Unlock() - <-b.ch -} diff --git a/vendor/github.com/mitchellh/mapstructure/.travis.yml b/vendor/github.com/mitchellh/mapstructure/.travis.yml new file mode 100644 index 00000000..1689c7d7 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/.travis.yml @@ -0,0 +1,8 @@ +language: go + +go: + - "1.11.x" + - tip + +script: + - go test diff --git a/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md new file mode 100644 index 00000000..3b3cb723 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/CHANGELOG.md @@ -0,0 +1,21 @@ +## 1.1.2 + +* Fix error when decode hook decodes interface implementation into interface + type. [GH-140] + +## 1.1.1 + +* Fix panic that can happen in `decodePtr` + +## 1.1.0 + +* Added `StringToIPHookFunc` to convert `string` to `net.IP` and `net.IPNet` [GH-133] +* Support struct to struct decoding [GH-137] +* If source map value is nil, then destination map value is nil (instead of empty) +* If source slice value is nil, then destination slice value is nil (instead of empty) +* If source pointer is nil, then destination pointer is set to nil (instead of + allocated zero value of type) + +## 1.0.0 + +* Initial tagged stable release. diff --git a/vendor/github.com/mitchellh/mapstructure/LICENSE b/vendor/github.com/mitchellh/mapstructure/LICENSE new file mode 100644 index 00000000..f9c841a5 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/vendor/github.com/mitchellh/mapstructure/README.md b/vendor/github.com/mitchellh/mapstructure/README.md new file mode 100644 index 00000000..0018dc7d --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/README.md @@ -0,0 +1,46 @@ +# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure) + +mapstructure is a Go library for decoding generic map values to structures +and vice versa, while providing helpful error handling. + +This library is most useful when decoding values from some data stream (JSON, +Gob, etc.) where you don't _quite_ know the structure of the underlying data +until you read a part of it. You can therefore read a `map[string]interface{}` +and use this library to decode it into the proper underlying native Go +structure. + +## Installation + +Standard `go get`: + +``` +$ go get github.com/mitchellh/mapstructure +``` + +## Usage & Example + +For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure). + +The `Decode` function has examples associated with it there. + +## But Why?! + +Go offers fantastic standard libraries for decoding formats such as JSON. +The standard method is to have a struct pre-created, and populate that struct +from the bytes of the encoded format. This is great, but the problem is if +you have configuration or an encoding that changes slightly depending on +specific fields. For example, consider this JSON: + +```json +{ + "type": "person", + "name": "Mitchell" +} +``` + +Perhaps we can't populate a specific structure without first reading +the "type" field from the JSON. We could always do two passes over the +decoding of the JSON (reading the "type" first, and the rest later). +However, it is much simpler to just decode this into a `map[string]interface{}` +structure, read the "type" key, then use something like this library +to decode it into the proper structure. diff --git a/vendor/github.com/mitchellh/mapstructure/decode_hooks.go b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go new file mode 100644 index 00000000..1f0abc65 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/decode_hooks.go @@ -0,0 +1,217 @@ +package mapstructure + +import ( + "errors" + "fmt" + "net" + "reflect" + "strconv" + "strings" + "time" +) + +// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns +// it into the proper DecodeHookFunc type, such as DecodeHookFuncType. +func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc { + // Create variables here so we can reference them with the reflect pkg + var f1 DecodeHookFuncType + var f2 DecodeHookFuncKind + + // Fill in the variables into this interface and the rest is done + // automatically using the reflect package. + potential := []interface{}{f1, f2} + + v := reflect.ValueOf(h) + vt := v.Type() + for _, raw := range potential { + pt := reflect.ValueOf(raw).Type() + if vt.ConvertibleTo(pt) { + return v.Convert(pt).Interface() + } + } + + return nil +} + +// DecodeHookExec executes the given decode hook. This should be used +// since it'll naturally degrade to the older backwards compatible DecodeHookFunc +// that took reflect.Kind instead of reflect.Type. +func DecodeHookExec( + raw DecodeHookFunc, + from reflect.Type, to reflect.Type, + data interface{}) (interface{}, error) { + switch f := typedDecodeHook(raw).(type) { + case DecodeHookFuncType: + return f(from, to, data) + case DecodeHookFuncKind: + return f(from.Kind(), to.Kind(), data) + default: + return nil, errors.New("invalid decode hook signature") + } +} + +// ComposeDecodeHookFunc creates a single DecodeHookFunc that +// automatically composes multiple DecodeHookFuncs. +// +// The composed funcs are called in order, with the result of the +// previous transformation. +func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + var err error + for _, f1 := range fs { + data, err = DecodeHookExec(f1, f, t, data) + if err != nil { + return nil, err + } + + // Modify the from kind to be correct with the new data + f = nil + if val := reflect.ValueOf(data); val.IsValid() { + f = val.Type() + } + } + + return data, nil + } +} + +// StringToSliceHookFunc returns a DecodeHookFunc that converts +// string to []string by splitting on the given sep. +func StringToSliceHookFunc(sep string) DecodeHookFunc { + return func( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + if f != reflect.String || t != reflect.Slice { + return data, nil + } + + raw := data.(string) + if raw == "" { + return []string{}, nil + } + + return strings.Split(raw, sep), nil + } +} + +// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts +// strings to time.Duration. +func StringToTimeDurationHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Duration(5)) { + return data, nil + } + + // Convert it by parsing + return time.ParseDuration(data.(string)) + } +} + +// StringToIPHookFunc returns a DecodeHookFunc that converts +// strings to net.IP +func StringToIPHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IP{}) { + return data, nil + } + + // Convert it by parsing + ip := net.ParseIP(data.(string)) + if ip == nil { + return net.IP{}, fmt.Errorf("failed parsing ip %v", data) + } + + return ip, nil + } +} + +// StringToIPNetHookFunc returns a DecodeHookFunc that converts +// strings to net.IPNet +func StringToIPNetHookFunc() DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(net.IPNet{}) { + return data, nil + } + + // Convert it by parsing + _, net, err := net.ParseCIDR(data.(string)) + return net, err + } +} + +// StringToTimeHookFunc returns a DecodeHookFunc that converts +// strings to time.Time. +func StringToTimeHookFunc(layout string) DecodeHookFunc { + return func( + f reflect.Type, + t reflect.Type, + data interface{}) (interface{}, error) { + if f.Kind() != reflect.String { + return data, nil + } + if t != reflect.TypeOf(time.Time{}) { + return data, nil + } + + // Convert it by parsing + return time.Parse(layout, data.(string)) + } +} + +// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to +// the decoder. +// +// Note that this is significantly different from the WeaklyTypedInput option +// of the DecoderConfig. +func WeaklyTypedHook( + f reflect.Kind, + t reflect.Kind, + data interface{}) (interface{}, error) { + dataVal := reflect.ValueOf(data) + switch t { + case reflect.String: + switch f { + case reflect.Bool: + if dataVal.Bool() { + return "1", nil + } + return "0", nil + case reflect.Float32: + return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil + case reflect.Int: + return strconv.FormatInt(dataVal.Int(), 10), nil + case reflect.Slice: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + if elemKind == reflect.Uint8 { + return string(dataVal.Interface().([]uint8)), nil + } + case reflect.Uint: + return strconv.FormatUint(dataVal.Uint(), 10), nil + } + } + + return data, nil +} diff --git a/vendor/github.com/mitchellh/mapstructure/error.go b/vendor/github.com/mitchellh/mapstructure/error.go new file mode 100644 index 00000000..47a99e5a --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/error.go @@ -0,0 +1,50 @@ +package mapstructure + +import ( + "errors" + "fmt" + "sort" + "strings" +) + +// Error implements the error interface and can represents multiple +// errors that occur in the course of a single decode. +type Error struct { + Errors []string +} + +func (e *Error) Error() string { + points := make([]string, len(e.Errors)) + for i, err := range e.Errors { + points[i] = fmt.Sprintf("* %s", err) + } + + sort.Strings(points) + return fmt.Sprintf( + "%d error(s) decoding:\n\n%s", + len(e.Errors), strings.Join(points, "\n")) +} + +// WrappedErrors implements the errwrap.Wrapper interface to make this +// return value more useful with the errwrap and go-multierror libraries. +func (e *Error) WrappedErrors() []error { + if e == nil { + return nil + } + + result := make([]error, len(e.Errors)) + for i, e := range e.Errors { + result[i] = errors.New(e) + } + + return result +} + +func appendErrors(errors []string, err error) []string { + switch e := err.(type) { + case *Error: + return append(errors, e.Errors...) + default: + return append(errors, e.Error()) + } +} diff --git a/vendor/github.com/mitchellh/mapstructure/go.mod b/vendor/github.com/mitchellh/mapstructure/go.mod new file mode 100644 index 00000000..d2a71256 --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/go.mod @@ -0,0 +1 @@ +module github.com/mitchellh/mapstructure diff --git a/vendor/github.com/mitchellh/mapstructure/mapstructure.go b/vendor/github.com/mitchellh/mapstructure/mapstructure.go new file mode 100644 index 00000000..256ee63f --- /dev/null +++ b/vendor/github.com/mitchellh/mapstructure/mapstructure.go @@ -0,0 +1,1149 @@ +// Package mapstructure exposes functionality to convert an arbitrary +// map[string]interface{} into a native Go structure. +// +// The Go structure can be arbitrarily complex, containing slices, +// other structs, etc. and the decoder will properly decode nested +// maps and so on into the proper structures in the native Go struct. +// See the examples to see what the decoder is capable of. +package mapstructure + +import ( + "encoding/json" + "errors" + "fmt" + "reflect" + "sort" + "strconv" + "strings" +) + +// DecodeHookFunc is the callback function that can be used for +// data transformations. See "DecodeHook" in the DecoderConfig +// struct. +// +// The type should be DecodeHookFuncType or DecodeHookFuncKind. +// Either is accepted. Types are a superset of Kinds (Types can return +// Kinds) and are generally a richer thing to use, but Kinds are simpler +// if you only need those. +// +// The reason DecodeHookFunc is multi-typed is for backwards compatibility: +// we started with Kinds and then realized Types were the better solution, +// but have a promise to not break backwards compat so we now support +// both. +type DecodeHookFunc interface{} + +// DecodeHookFuncType is a DecodeHookFunc which has complete information about +// the source and target types. +type DecodeHookFuncType func(reflect.Type, reflect.Type, interface{}) (interface{}, error) + +// DecodeHookFuncKind is a DecodeHookFunc which knows only the Kinds of the +// source and target types. +type DecodeHookFuncKind func(reflect.Kind, reflect.Kind, interface{}) (interface{}, error) + +// DecoderConfig is the configuration that is used to create a new decoder +// and allows customization of various aspects of decoding. +type DecoderConfig struct { + // DecodeHook, if set, will be called before any decoding and any + // type conversion (if WeaklyTypedInput is on). This lets you modify + // the values before they're set down onto the resulting struct. + // + // If an error is returned, the entire decode will fail with that + // error. + DecodeHook DecodeHookFunc + + // If ErrorUnused is true, then it is an error for there to exist + // keys in the original map that were unused in the decoding process + // (extra keys). + ErrorUnused bool + + // ZeroFields, if set to true, will zero fields before writing them. + // For example, a map will be emptied before decoded values are put in + // it. If this is false, a map will be merged. + ZeroFields bool + + // If WeaklyTypedInput is true, the decoder will make the following + // "weak" conversions: + // + // - bools to string (true = "1", false = "0") + // - numbers to string (base 10) + // - bools to int/uint (true = 1, false = 0) + // - strings to int/uint (base implied by prefix) + // - int to bool (true if value != 0) + // - string to bool (accepts: 1, t, T, TRUE, true, True, 0, f, F, + // FALSE, false, False. Anything else is an error) + // - empty array = empty map and vice versa + // - negative numbers to overflowed uint values (base 10) + // - slice of maps to a merged map + // - single values are converted to slices if required. Each + // element is weakly decoded. For example: "4" can become []int{4} + // if the target type is an int slice. + // + WeaklyTypedInput bool + + // Metadata is the struct that will contain extra metadata about + // the decoding. If this is nil, then no metadata will be tracked. + Metadata *Metadata + + // Result is a pointer to the struct that will contain the decoded + // value. + Result interface{} + + // The tag name that mapstructure reads for field names. This + // defaults to "mapstructure" + TagName string +} + +// A Decoder takes a raw interface value and turns it into structured +// data, keeping track of rich error information along the way in case +// anything goes wrong. Unlike the basic top-level Decode method, you can +// more finely control how the Decoder behaves using the DecoderConfig +// structure. The top-level Decode method is just a convenience that sets +// up the most basic Decoder. +type Decoder struct { + config *DecoderConfig +} + +// Metadata contains information about decoding a structure that +// is tedious or difficult to get otherwise. +type Metadata struct { + // Keys are the keys of the structure which were successfully decoded + Keys []string + + // Unused is a slice of keys that were found in the raw value but + // weren't decoded since there was no matching field in the result interface + Unused []string +} + +// Decode takes an input structure and uses reflection to translate it to +// the output structure. output must be a pointer to a map or struct. +func Decode(input interface{}, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecode is the same as Decode but is shorthand to enable +// WeaklyTypedInput. See DecoderConfig for more info. +func WeakDecode(input, output interface{}) error { + config := &DecoderConfig{ + Metadata: nil, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// DecodeMetadata is the same as Decode, but is shorthand to +// enable metadata collection. See DecoderConfig for more info. +func DecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// WeakDecodeMetadata is the same as Decode, but is shorthand to +// enable both WeaklyTypedInput and metadata collection. See +// DecoderConfig for more info. +func WeakDecodeMetadata(input interface{}, output interface{}, metadata *Metadata) error { + config := &DecoderConfig{ + Metadata: metadata, + Result: output, + WeaklyTypedInput: true, + } + + decoder, err := NewDecoder(config) + if err != nil { + return err + } + + return decoder.Decode(input) +} + +// NewDecoder returns a new decoder for the given configuration. Once +// a decoder has been returned, the same configuration must not be used +// again. +func NewDecoder(config *DecoderConfig) (*Decoder, error) { + val := reflect.ValueOf(config.Result) + if val.Kind() != reflect.Ptr { + return nil, errors.New("result must be a pointer") + } + + val = val.Elem() + if !val.CanAddr() { + return nil, errors.New("result must be addressable (a pointer)") + } + + if config.Metadata != nil { + if config.Metadata.Keys == nil { + config.Metadata.Keys = make([]string, 0) + } + + if config.Metadata.Unused == nil { + config.Metadata.Unused = make([]string, 0) + } + } + + if config.TagName == "" { + config.TagName = "mapstructure" + } + + result := &Decoder{ + config: config, + } + + return result, nil +} + +// Decode decodes the given raw interface to the target pointer specified +// by the configuration. +func (d *Decoder) Decode(input interface{}) error { + return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) +} + +// Decodes an unknown data type into a specific reflection value. +func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) error { + var inputVal reflect.Value + if input != nil { + inputVal = reflect.ValueOf(input) + + // We need to check here if input is a typed nil. Typed nils won't + // match the "input == nil" below so we check that here. + if inputVal.Kind() == reflect.Ptr && inputVal.IsNil() { + input = nil + } + } + + if input == nil { + // If the data is nil, then we don't set anything, unless ZeroFields is set + // to true. + if d.config.ZeroFields { + outVal.Set(reflect.Zero(outVal.Type())) + + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + } + return nil + } + + if !inputVal.IsValid() { + // If the input value is invalid, then we just set the value + // to be the zero value. + outVal.Set(reflect.Zero(outVal.Type())) + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + return nil + } + + if d.config.DecodeHook != nil { + // We have a DecodeHook, so let's pre-process the input. + var err error + input, err = DecodeHookExec( + d.config.DecodeHook, + inputVal.Type(), outVal.Type(), input) + if err != nil { + return fmt.Errorf("error decoding '%s': %s", name, err) + } + } + + var err error + outputKind := getKind(outVal) + switch outputKind { + case reflect.Bool: + err = d.decodeBool(name, input, outVal) + case reflect.Interface: + err = d.decodeBasic(name, input, outVal) + case reflect.String: + err = d.decodeString(name, input, outVal) + case reflect.Int: + err = d.decodeInt(name, input, outVal) + case reflect.Uint: + err = d.decodeUint(name, input, outVal) + case reflect.Float32: + err = d.decodeFloat(name, input, outVal) + case reflect.Struct: + err = d.decodeStruct(name, input, outVal) + case reflect.Map: + err = d.decodeMap(name, input, outVal) + case reflect.Ptr: + err = d.decodePtr(name, input, outVal) + case reflect.Slice: + err = d.decodeSlice(name, input, outVal) + case reflect.Array: + err = d.decodeArray(name, input, outVal) + case reflect.Func: + err = d.decodeFunc(name, input, outVal) + default: + // If we reached this point then we weren't able to decode it + return fmt.Errorf("%s: unsupported type: %s", name, outputKind) + } + + // If we reached here, then we successfully decoded SOMETHING, so + // mark the key as used if we're tracking metainput. + if d.config.Metadata != nil && name != "" { + d.config.Metadata.Keys = append(d.config.Metadata.Keys, name) + } + + return err +} + +// This decodes a basic type (bool, int, string, etc.) and sets the +// value to "data" of that type. +func (d *Decoder) decodeBasic(name string, data interface{}, val reflect.Value) error { + if val.IsValid() && val.Elem().IsValid() { + return d.decode(name, data, val.Elem()) + } + + dataVal := reflect.ValueOf(data) + + // If the input data is a pointer, and the assigned type is the dereference + // of that exact pointer, then indirect it so that we can assign it. + // Example: *string to string + if dataVal.Kind() == reflect.Ptr && dataVal.Type().Elem() == val.Type() { + dataVal = reflect.Indirect(dataVal) + } + + if !dataVal.IsValid() { + dataVal = reflect.Zero(val.Type()) + } + + dataValType := dataVal.Type() + if !dataValType.AssignableTo(val.Type()) { + return fmt.Errorf( + "'%s' expected type '%s', got '%s'", + name, val.Type(), dataValType) + } + + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeString(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + converted := true + switch { + case dataKind == reflect.String: + val.SetString(dataVal.String()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetString("1") + } else { + val.SetString("0") + } + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatInt(dataVal.Int(), 10)) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatUint(dataVal.Uint(), 10)) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetString(strconv.FormatFloat(dataVal.Float(), 'f', -1, 64)) + case dataKind == reflect.Slice && d.config.WeaklyTypedInput, + dataKind == reflect.Array && d.config.WeaklyTypedInput: + dataType := dataVal.Type() + elemKind := dataType.Elem().Kind() + switch elemKind { + case reflect.Uint8: + var uints []uint8 + if dataKind == reflect.Array { + uints = make([]uint8, dataVal.Len(), dataVal.Len()) + for i := range uints { + uints[i] = dataVal.Index(i).Interface().(uint8) + } + } else { + uints = dataVal.Interface().([]uint8) + } + val.SetString(string(uints)) + default: + converted = false + } + default: + converted = false + } + + if !converted { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeInt(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetInt(dataVal.Int()) + case dataKind == reflect.Uint: + val.SetInt(int64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetInt(int64(dataVal.Float())) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetInt(1) + } else { + val.SetInt(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseInt(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetInt(i) + } else { + return fmt.Errorf("cannot parse '%s' as int: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Int64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetInt(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Int: + i := dataVal.Int() + if i < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %d overflows uint", + name, i) + } + val.SetUint(uint64(i)) + case dataKind == reflect.Uint: + val.SetUint(dataVal.Uint()) + case dataKind == reflect.Float32: + f := dataVal.Float() + if f < 0 && !d.config.WeaklyTypedInput { + return fmt.Errorf("cannot parse '%s', %f overflows uint", + name, f) + } + val.SetUint(uint64(f)) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetUint(1) + } else { + val.SetUint(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + i, err := strconv.ParseUint(dataVal.String(), 0, val.Type().Bits()) + if err == nil { + val.SetUint(i) + } else { + return fmt.Errorf("cannot parse '%s' as uint: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeBool(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + + switch { + case dataKind == reflect.Bool: + val.SetBool(dataVal.Bool()) + case dataKind == reflect.Int && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Int() != 0) + case dataKind == reflect.Uint && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Uint() != 0) + case dataKind == reflect.Float32 && d.config.WeaklyTypedInput: + val.SetBool(dataVal.Float() != 0) + case dataKind == reflect.String && d.config.WeaklyTypedInput: + b, err := strconv.ParseBool(dataVal.String()) + if err == nil { + val.SetBool(b) + } else if dataVal.String() == "" { + val.SetBool(false) + } else { + return fmt.Errorf("cannot parse '%s' as bool: %s", name, err) + } + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataKind := getKind(dataVal) + dataType := dataVal.Type() + + switch { + case dataKind == reflect.Int: + val.SetFloat(float64(dataVal.Int())) + case dataKind == reflect.Uint: + val.SetFloat(float64(dataVal.Uint())) + case dataKind == reflect.Float32: + val.SetFloat(dataVal.Float()) + case dataKind == reflect.Bool && d.config.WeaklyTypedInput: + if dataVal.Bool() { + val.SetFloat(1) + } else { + val.SetFloat(0) + } + case dataKind == reflect.String && d.config.WeaklyTypedInput: + f, err := strconv.ParseFloat(dataVal.String(), val.Type().Bits()) + if err == nil { + val.SetFloat(f) + } else { + return fmt.Errorf("cannot parse '%s' as float: %s", name, err) + } + case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number": + jn := data.(json.Number) + i, err := jn.Float64() + if err != nil { + return fmt.Errorf( + "error decoding json.Number into %s: %s", name, err) + } + val.SetFloat(i) + default: + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + + return nil +} + +func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // By default we overwrite keys in the current map + valMap := val + + // If the map is nil or we're purposely zeroing fields, make a new map + if valMap.IsNil() || d.config.ZeroFields { + // Make a new map to hold our result + mapType := reflect.MapOf(valKeyType, valElemType) + valMap = reflect.MakeMap(mapType) + } + + // Check input type and based on the input type jump to the proper func + dataVal := reflect.Indirect(reflect.ValueOf(data)) + switch dataVal.Kind() { + case reflect.Map: + return d.decodeMapFromMap(name, dataVal, val, valMap) + + case reflect.Struct: + return d.decodeMapFromStruct(name, dataVal, val, valMap) + + case reflect.Array, reflect.Slice: + if d.config.WeaklyTypedInput { + return d.decodeMapFromSlice(name, dataVal, val, valMap) + } + + fallthrough + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeMapFromSlice(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + // Special case for BC reasons (covered by tests) + if dataVal.Len() == 0 { + val.Set(valMap) + return nil + } + + for i := 0; i < dataVal.Len(); i++ { + err := d.decode( + fmt.Sprintf("%s[%d]", name, i), + dataVal.Index(i).Interface(), val) + if err != nil { + return err + } + } + + return nil +} + +func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + valType := val.Type() + valKeyType := valType.Key() + valElemType := valType.Elem() + + // Accumulate errors + errors := make([]string, 0) + + // If the input data is empty, then we just match what the input data is. + if dataVal.Len() == 0 { + if dataVal.IsNil() { + if !val.IsNil() { + val.Set(dataVal) + } + } else { + // Set to empty allocated value + val.Set(valMap) + } + + return nil + } + + for _, k := range dataVal.MapKeys() { + fieldName := fmt.Sprintf("%s[%s]", name, k) + + // First decode the key into the proper type + currentKey := reflect.Indirect(reflect.New(valKeyType)) + if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { + errors = appendErrors(errors, err) + continue + } + + // Next decode the data into the proper type + v := dataVal.MapIndex(k).Interface() + currentVal := reflect.Indirect(reflect.New(valElemType)) + if err := d.decode(fieldName, v, currentVal); err != nil { + errors = appendErrors(errors, err) + continue + } + + valMap.SetMapIndex(currentKey, currentVal) + } + + // Set the built up map to the value + val.Set(valMap) + + // If we had errors, return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { + typ := dataVal.Type() + for i := 0; i < typ.NumField(); i++ { + // Get the StructField first since this is a cheap operation. If the + // field is unexported, then ignore it. + f := typ.Field(i) + if f.PkgPath != "" { + continue + } + + // Next get the actual value of this field and verify it is assignable + // to the map value. + v := dataVal.Field(i) + if !v.Type().AssignableTo(valMap.Type().Elem()) { + return fmt.Errorf("cannot assign type '%s' to map value field of type '%s'", v.Type(), valMap.Type().Elem()) + } + + tagValue := f.Tag.Get(d.config.TagName) + tagParts := strings.Split(tagValue, ",") + + // Determine the name of the key in the map + keyName := f.Name + if tagParts[0] != "" { + if tagParts[0] == "-" { + continue + } + keyName = tagParts[0] + } + + // If "squash" is specified in the tag, we squash the field down. + squash := false + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + if squash && v.Kind() != reflect.Struct { + return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) + } + + switch v.Kind() { + // this is an embedded struct, so handle it differently + case reflect.Struct: + x := reflect.New(v.Type()) + x.Elem().Set(v) + + vType := valMap.Type() + vKeyType := vType.Key() + vElemType := vType.Elem() + mType := reflect.MapOf(vKeyType, vElemType) + vMap := reflect.MakeMap(mType) + + err := d.decode(keyName, x.Interface(), vMap) + if err != nil { + return err + } + + if squash { + for _, k := range vMap.MapKeys() { + valMap.SetMapIndex(k, vMap.MapIndex(k)) + } + } else { + valMap.SetMapIndex(reflect.ValueOf(keyName), vMap) + } + + default: + valMap.SetMapIndex(reflect.ValueOf(keyName), v) + } + } + + if val.CanAddr() { + val.Set(valMap) + } + + return nil +} + +func (d *Decoder) decodePtr(name string, data interface{}, val reflect.Value) error { + // If the input data is nil, then we want to just set the output + // pointer to be nil as well. + isNil := data == nil + if !isNil { + switch v := reflect.Indirect(reflect.ValueOf(data)); v.Kind() { + case reflect.Chan, + reflect.Func, + reflect.Interface, + reflect.Map, + reflect.Ptr, + reflect.Slice: + isNil = v.IsNil() + } + } + if isNil { + if !val.IsNil() && val.CanSet() { + nilValue := reflect.New(val.Type()).Elem() + val.Set(nilValue) + } + + return nil + } + + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + valType := val.Type() + valElemType := valType.Elem() + if val.CanSet() { + realVal := val + if realVal.IsNil() || d.config.ZeroFields { + realVal = reflect.New(valElemType) + } + + if err := d.decode(name, data, reflect.Indirect(realVal)); err != nil { + return err + } + + val.Set(realVal) + } else { + if err := d.decode(name, data, reflect.Indirect(val)); err != nil { + return err + } + } + return nil +} + +func (d *Decoder) decodeFunc(name string, data interface{}, val reflect.Value) error { + // Create an element of the concrete (non pointer) type and decode + // into that. Then set the value of the pointer to this type. + dataVal := reflect.Indirect(reflect.ValueOf(data)) + if val.Type() != dataVal.Type() { + return fmt.Errorf( + "'%s' expected type '%s', got unconvertible type '%s'", + name, val.Type(), dataVal.Type()) + } + val.Set(dataVal) + return nil +} + +func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + sliceType := reflect.SliceOf(valElemType) + + valSlice := val + if valSlice.IsNil() || d.config.ZeroFields { + if d.config.WeaklyTypedInput { + switch { + // Slice and array we use the normal logic + case dataValKind == reflect.Slice, dataValKind == reflect.Array: + break + + // Empty maps turn into empty slices + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.MakeSlice(sliceType, 0, 0)) + return nil + } + // Create slice of maps of other sizes + return d.decodeSlice(name, []interface{}{data}, val) + + case dataValKind == reflect.String && valElemType.Kind() == reflect.Uint8: + return d.decodeSlice(name, []byte(dataVal.String()), val) + + // All other types we try to convert to the slice type + // and "lift" it into it. i.e. a string becomes a string slice. + default: + // Just re-try this function with data as a slice. + return d.decodeSlice(name, []interface{}{data}, val) + } + } + + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + + // If the input value is empty, then don't allocate since non-nil != nil + if dataVal.Len() == 0 { + return nil + } + + // Make a new slice to hold our result, same size as the original data. + valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + for valSlice.Len() <= i { + valSlice = reflect.Append(valSlice, reflect.Zero(valElemType)) + } + currentField := valSlice.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the slice we built up + val.Set(valSlice) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + dataValKind := dataVal.Kind() + valType := val.Type() + valElemType := valType.Elem() + arrayType := reflect.ArrayOf(valType.Len(), valElemType) + + valArray := val + + if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { + // Check input type + if dataValKind != reflect.Array && dataValKind != reflect.Slice { + if d.config.WeaklyTypedInput { + switch { + // Empty maps turn into empty arrays + case dataValKind == reflect.Map: + if dataVal.Len() == 0 { + val.Set(reflect.Zero(arrayType)) + return nil + } + + // All other types we try to convert to the array type + // and "lift" it into it. i.e. a string becomes a string array. + default: + // Just re-try this function with data as a slice. + return d.decodeArray(name, []interface{}{data}, val) + } + } + + return fmt.Errorf( + "'%s': source data must be an array or slice, got %s", name, dataValKind) + + } + if dataVal.Len() > arrayType.Len() { + return fmt.Errorf( + "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) + + } + + // Make a new array to hold our result, same size as the original data. + valArray = reflect.New(arrayType).Elem() + } + + // Accumulate any errors + errors := make([]string, 0) + + for i := 0; i < dataVal.Len(); i++ { + currentData := dataVal.Index(i).Interface() + currentField := valArray.Index(i) + + fieldName := fmt.Sprintf("%s[%d]", name, i) + if err := d.decode(fieldName, currentData, currentField); err != nil { + errors = appendErrors(errors, err) + } + } + + // Finally, set the value to the array we built up + val.Set(valArray) + + // If there were errors, we return those + if len(errors) > 0 { + return &Error{errors} + } + + return nil +} + +func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { + dataVal := reflect.Indirect(reflect.ValueOf(data)) + + // If the type of the value to write to and the data match directly, + // then we just set it directly instead of recursing into the structure. + if dataVal.Type() == val.Type() { + val.Set(dataVal) + return nil + } + + dataValKind := dataVal.Kind() + switch dataValKind { + case reflect.Map: + return d.decodeStructFromMap(name, dataVal, val) + + case reflect.Struct: + // Not the most efficient way to do this but we can optimize later if + // we want to. To convert from struct to struct we go to map first + // as an intermediary. + m := make(map[string]interface{}) + mval := reflect.Indirect(reflect.ValueOf(&m)) + if err := d.decodeMapFromStruct(name, dataVal, mval, mval); err != nil { + return err + } + + result := d.decodeStructFromMap(name, mval, val) + return result + + default: + return fmt.Errorf("'%s' expected a map, got '%s'", name, dataVal.Kind()) + } +} + +func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) error { + dataValType := dataVal.Type() + if kind := dataValType.Key().Kind(); kind != reflect.String && kind != reflect.Interface { + return fmt.Errorf( + "'%s' needs a map with string keys, has '%s' keys", + name, dataValType.Key().Kind()) + } + + dataValKeys := make(map[reflect.Value]struct{}) + dataValKeysUnused := make(map[interface{}]struct{}) + for _, dataValKey := range dataVal.MapKeys() { + dataValKeys[dataValKey] = struct{}{} + dataValKeysUnused[dataValKey.Interface()] = struct{}{} + } + + errors := make([]string, 0) + + // This slice will keep track of all the structs we'll be decoding. + // There can be more than one struct if there are embedded structs + // that are squashed. + structs := make([]reflect.Value, 1, 5) + structs[0] = val + + // Compile the list of all the fields that we're going to be decoding + // from all the structs. + type field struct { + field reflect.StructField + val reflect.Value + } + fields := []field{} + for len(structs) > 0 { + structVal := structs[0] + structs = structs[1:] + + structType := structVal.Type() + + for i := 0; i < structType.NumField(); i++ { + fieldType := structType.Field(i) + fieldKind := fieldType.Type.Kind() + + // If "squash" is specified in the tag, we squash the field down. + squash := false + tagParts := strings.Split(fieldType.Tag.Get(d.config.TagName), ",") + for _, tag := range tagParts[1:] { + if tag == "squash" { + squash = true + break + } + } + + if squash { + if fieldKind != reflect.Struct { + errors = appendErrors(errors, + fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldKind)) + } else { + structs = append(structs, structVal.FieldByName(fieldType.Name)) + } + continue + } + + // Normal struct field, store it away + fields = append(fields, field{fieldType, structVal.Field(i)}) + } + } + + // for fieldType, field := range fields { + for _, f := range fields { + field, fieldValue := f.field, f.val + fieldName := field.Name + + tagValue := field.Tag.Get(d.config.TagName) + tagValue = strings.SplitN(tagValue, ",", 2)[0] + if tagValue != "" { + fieldName = tagValue + } + + rawMapKey := reflect.ValueOf(fieldName) + rawMapVal := dataVal.MapIndex(rawMapKey) + if !rawMapVal.IsValid() { + // Do a slower search by iterating over each key and + // doing case-insensitive search. + for dataValKey := range dataValKeys { + mK, ok := dataValKey.Interface().(string) + if !ok { + // Not a string key + continue + } + + if strings.EqualFold(mK, fieldName) { + rawMapKey = dataValKey + rawMapVal = dataVal.MapIndex(dataValKey) + break + } + } + + if !rawMapVal.IsValid() { + // There was no matching key in the map for the value in + // the struct. Just ignore. + continue + } + } + + // Delete the key we're using from the unused map so we stop tracking + delete(dataValKeysUnused, rawMapKey.Interface()) + + if !fieldValue.IsValid() { + // This should never happen + panic("field is not valid") + } + + // If we can't set the field, then it is unexported or something, + // and we just continue onwards. + if !fieldValue.CanSet() { + continue + } + + // If the name is empty string, then we're at the root, and we + // don't dot-join the fields. + if name != "" { + fieldName = fmt.Sprintf("%s.%s", name, fieldName) + } + + if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { + errors = appendErrors(errors, err) + } + } + + if d.config.ErrorUnused && len(dataValKeysUnused) > 0 { + keys := make([]string, 0, len(dataValKeysUnused)) + for rawKey := range dataValKeysUnused { + keys = append(keys, rawKey.(string)) + } + sort.Strings(keys) + + err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) + errors = appendErrors(errors, err) + } + + if len(errors) > 0 { + return &Error{errors} + } + + // Add the unused keys to the list of unused keys if we're tracking metadata + if d.config.Metadata != nil { + for rawKey := range dataValKeysUnused { + key := rawKey.(string) + if name != "" { + key = fmt.Sprintf("%s.%s", name, key) + } + + d.config.Metadata.Unused = append(d.config.Metadata.Unused, key) + } + } + + return nil +} + +func getKind(val reflect.Value) reflect.Kind { + kind := val.Kind() + + switch { + case kind >= reflect.Int && kind <= reflect.Int64: + return reflect.Int + case kind >= reflect.Uint && kind <= reflect.Uint64: + return reflect.Uint + case kind >= reflect.Float32 && kind <= reflect.Float64: + return reflect.Float32 + default: + return kind + } +}