TUN-5989: Add in-memory otlp exporter
This commit is contained in:
parent
9cde11f8e0
commit
def8f57dbc
|
@ -14,6 +14,7 @@ import (
|
||||||
"github.com/google/uuid"
|
"github.com/google/uuid"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/tracing"
|
||||||
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
"github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
"github.com/cloudflare/cloudflared/websocket"
|
"github.com/cloudflare/cloudflared/websocket"
|
||||||
)
|
)
|
||||||
|
@ -121,7 +122,7 @@ func (t Type) String() string {
|
||||||
|
|
||||||
// OriginProxy is how data flows from cloudflared to the origin services running behind it.
|
// OriginProxy is how data flows from cloudflared to the origin services running behind it.
|
||||||
type OriginProxy interface {
|
type OriginProxy interface {
|
||||||
ProxyHTTP(w ResponseWriter, req *http.Request, isWebsocket bool) error
|
ProxyHTTP(w ResponseWriter, tr *tracing.TracedRequest, isWebsocket bool) error
|
||||||
ProxyTCP(ctx context.Context, rwa ReadWriteAcker, req *TCPRequest) error
|
ProxyTCP(ctx context.Context, rwa ReadWriteAcker, req *TCPRequest) error
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"github.com/stretchr/testify/assert"
|
"github.com/stretchr/testify/assert"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/tracing"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
"github.com/cloudflare/cloudflared/websocket"
|
"github.com/cloudflare/cloudflared/websocket"
|
||||||
)
|
)
|
||||||
|
@ -55,9 +56,10 @@ type mockOriginProxy struct{}
|
||||||
|
|
||||||
func (moc *mockOriginProxy) ProxyHTTP(
|
func (moc *mockOriginProxy) ProxyHTTP(
|
||||||
w ResponseWriter,
|
w ResponseWriter,
|
||||||
req *http.Request,
|
tr *tracing.TracedRequest,
|
||||||
isWebsocket bool,
|
isWebsocket bool,
|
||||||
) error {
|
) error {
|
||||||
|
req := tr.Request
|
||||||
if isWebsocket {
|
if isWebsocket {
|
||||||
switch req.URL.Path {
|
switch req.URL.Path {
|
||||||
case "/ws/echo":
|
case "/ws/echo":
|
||||||
|
|
|
@ -12,6 +12,7 @@ import (
|
||||||
"golang.org/x/sync/errgroup"
|
"golang.org/x/sync/errgroup"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/h2mux"
|
"github.com/cloudflare/cloudflared/h2mux"
|
||||||
|
"github.com/cloudflare/cloudflared/tracing"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
"github.com/cloudflare/cloudflared/websocket"
|
"github.com/cloudflare/cloudflared/websocket"
|
||||||
)
|
)
|
||||||
|
@ -233,7 +234,7 @@ func (h *h2muxConnection) ServeStream(stream *h2mux.MuxedStream) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = originProxy.ProxyHTTP(respWriter, req, sourceConnectionType == TypeWebsocket)
|
err = originProxy.ProxyHTTP(respWriter, tracing.NewTracedRequest(req), sourceConnectionType == TypeWebsocket)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
respWriter.WriteErrorResponse()
|
respWriter.WriteErrorResponse()
|
||||||
}
|
}
|
||||||
|
|
|
@ -15,6 +15,7 @@ import (
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
"golang.org/x/net/http2"
|
"golang.org/x/net/http2"
|
||||||
|
|
||||||
|
"github.com/cloudflare/cloudflared/tracing"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -130,7 +131,9 @@ func (c *HTTP2Connection) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
case TypeWebsocket, TypeHTTP:
|
case TypeWebsocket, TypeHTTP:
|
||||||
stripWebsocketUpgradeHeader(r)
|
stripWebsocketUpgradeHeader(r)
|
||||||
if err := originProxy.ProxyHTTP(respWriter, r, connType == TypeWebsocket); err != nil {
|
// Check for tracing on request
|
||||||
|
tr := tracing.NewTracedRequest(r)
|
||||||
|
if err := originProxy.ProxyHTTP(respWriter, tr, connType == TypeWebsocket); err != nil {
|
||||||
err := fmt.Errorf("Failed to proxy HTTP: %w", err)
|
err := fmt.Errorf("Failed to proxy HTTP: %w", err)
|
||||||
c.log.Error().Err(err)
|
c.log.Error().Err(err)
|
||||||
respWriter.WriteErrorResponse()
|
respWriter.WriteErrorResponse()
|
||||||
|
|
|
@ -20,6 +20,7 @@ import (
|
||||||
"github.com/cloudflare/cloudflared/datagramsession"
|
"github.com/cloudflare/cloudflared/datagramsession"
|
||||||
"github.com/cloudflare/cloudflared/ingress"
|
"github.com/cloudflare/cloudflared/ingress"
|
||||||
quicpogs "github.com/cloudflare/cloudflared/quic"
|
quicpogs "github.com/cloudflare/cloudflared/quic"
|
||||||
|
"github.com/cloudflare/cloudflared/tracing"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -181,13 +182,13 @@ func (q *QUICConnection) handleDataStream(stream *quicpogs.RequestServerStream)
|
||||||
}
|
}
|
||||||
switch connectRequest.Type {
|
switch connectRequest.Type {
|
||||||
case quicpogs.ConnectionTypeHTTP, quicpogs.ConnectionTypeWebsocket:
|
case quicpogs.ConnectionTypeHTTP, quicpogs.ConnectionTypeWebsocket:
|
||||||
req, err := buildHTTPRequest(connectRequest, stream)
|
tracedReq, err := buildHTTPRequest(connectRequest, stream)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
w := newHTTPResponseAdapter(stream)
|
w := newHTTPResponseAdapter(stream)
|
||||||
return originProxy.ProxyHTTP(w, req, connectRequest.Type == quicpogs.ConnectionTypeWebsocket)
|
return originProxy.ProxyHTTP(w, tracedReq, connectRequest.Type == quicpogs.ConnectionTypeWebsocket)
|
||||||
case quicpogs.ConnectionTypeTCP:
|
case quicpogs.ConnectionTypeTCP:
|
||||||
rwa := &streamReadWriteAcker{stream}
|
rwa := &streamReadWriteAcker{stream}
|
||||||
return originProxy.ProxyTCP(context.Background(), rwa, &TCPRequest{Dest: connectRequest.Dest})
|
return originProxy.ProxyTCP(context.Background(), rwa, &TCPRequest{Dest: connectRequest.Dest})
|
||||||
|
@ -305,7 +306,7 @@ func (hrw httpResponseAdapter) WriteErrorResponse(err error) {
|
||||||
hrw.WriteConnectResponseData(err, quicpogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(http.StatusBadGateway)})
|
hrw.WriteConnectResponseData(err, quicpogs.Metadata{Key: "HttpStatus", Val: strconv.Itoa(http.StatusBadGateway)})
|
||||||
}
|
}
|
||||||
|
|
||||||
func buildHTTPRequest(connectRequest *quicpogs.ConnectRequest, body io.ReadCloser) (*http.Request, error) {
|
func buildHTTPRequest(connectRequest *quicpogs.ConnectRequest, body io.ReadCloser) (*tracing.TracedRequest, error) {
|
||||||
metadata := connectRequest.MetadataMap()
|
metadata := connectRequest.MetadataMap()
|
||||||
dest := connectRequest.Dest
|
dest := connectRequest.Dest
|
||||||
method := metadata[HTTPMethodKey]
|
method := metadata[HTTPMethodKey]
|
||||||
|
@ -345,7 +346,10 @@ func buildHTTPRequest(connectRequest *quicpogs.ConnectRequest, body io.ReadClose
|
||||||
req.Body = http.NoBody
|
req.Body = http.NoBody
|
||||||
}
|
}
|
||||||
stripWebsocketUpgradeHeader(req)
|
stripWebsocketUpgradeHeader(req)
|
||||||
return req, err
|
|
||||||
|
// Check for tracing on request
|
||||||
|
tracedReq := tracing.NewTracedRequest(req)
|
||||||
|
return tracedReq, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func setContentLength(req *http.Request) error {
|
func setContentLength(req *http.Request) error {
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/datagramsession"
|
"github.com/cloudflare/cloudflared/datagramsession"
|
||||||
quicpogs "github.com/cloudflare/cloudflared/quic"
|
quicpogs "github.com/cloudflare/cloudflared/quic"
|
||||||
|
"github.com/cloudflare/cloudflared/tracing"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -219,9 +220,10 @@ func quicServer(
|
||||||
|
|
||||||
type mockOriginProxyWithRequest struct{}
|
type mockOriginProxyWithRequest struct{}
|
||||||
|
|
||||||
func (moc *mockOriginProxyWithRequest) ProxyHTTP(w ResponseWriter, r *http.Request, isWebsocket bool) error {
|
func (moc *mockOriginProxyWithRequest) ProxyHTTP(w ResponseWriter, tr *tracing.TracedRequest, isWebsocket bool) error {
|
||||||
// These are a series of crude tests to ensure the headers and http related data is transferred from
|
// These are a series of crude tests to ensure the headers and http related data is transferred from
|
||||||
// metadata.
|
// metadata.
|
||||||
|
r := tr.Request
|
||||||
if r.Method == "" {
|
if r.Method == "" {
|
||||||
return errors.New("method not sent")
|
return errors.New("method not sent")
|
||||||
}
|
}
|
||||||
|
@ -478,7 +480,7 @@ func TestBuildHTTPRequest(t *testing.T) {
|
||||||
req, err := buildHTTPRequest(test.connectRequest, test.body)
|
req, err := buildHTTPRequest(test.connectRequest, test.body)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
test.req = test.req.WithContext(req.Context())
|
test.req = test.req.WithContext(req.Context())
|
||||||
assert.Equal(t, test.req, req)
|
assert.Equal(t, test.req, req.Request)
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
15
go.mod
15
go.mod
|
@ -26,14 +26,21 @@ require (
|
||||||
github.com/prometheus/client_model v0.2.0
|
github.com/prometheus/client_model v0.2.0
|
||||||
github.com/rivo/tview v0.0.0-20200712113419-c65badfc3d92
|
github.com/rivo/tview v0.0.0-20200712113419-c65badfc3d92
|
||||||
github.com/rs/zerolog v1.20.0
|
github.com/rs/zerolog v1.20.0
|
||||||
github.com/stretchr/testify v1.7.0
|
github.com/stretchr/testify v1.7.1
|
||||||
github.com/urfave/cli/v2 v2.3.0
|
github.com/urfave/cli/v2 v2.3.0
|
||||||
|
go.opentelemetry.io/contrib/propagators v0.22.0
|
||||||
|
go.opentelemetry.io/otel v1.6.3
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3
|
||||||
|
go.opentelemetry.io/otel/sdk v1.6.3
|
||||||
|
go.opentelemetry.io/otel/trace v1.6.3
|
||||||
|
go.opentelemetry.io/proto/otlp v0.15.0
|
||||||
go.uber.org/automaxprocs v1.4.0
|
go.uber.org/automaxprocs v1.4.0
|
||||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
|
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519
|
||||||
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d
|
golang.org/x/net v0.0.0-20220114011407-0dd24b26b47d
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9
|
||||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
|
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b
|
||||||
|
google.golang.org/protobuf v1.28.0
|
||||||
gopkg.in/coreos/go-oidc.v2 v2.2.1
|
gopkg.in/coreos/go-oidc.v2 v2.2.1
|
||||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
gopkg.in/natefinch/lumberjack.v2 v2.0.0
|
||||||
gopkg.in/square/go-jose.v2 v2.6.0
|
gopkg.in/square/go-jose.v2 v2.6.0
|
||||||
|
@ -58,10 +65,13 @@ require (
|
||||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
|
||||||
github.com/francoispqt/gojay v1.2.13 // indirect
|
github.com/francoispqt/gojay v1.2.13 // indirect
|
||||||
github.com/gdamore/encoding v1.0.0 // indirect
|
github.com/gdamore/encoding v1.0.0 // indirect
|
||||||
|
github.com/go-logr/logr v1.2.3 // indirect
|
||||||
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||||
github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 // indirect
|
github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 // indirect
|
||||||
github.com/gobwas/pool v0.2.1 // indirect
|
github.com/gobwas/pool v0.2.1 // indirect
|
||||||
github.com/golang/protobuf v1.5.2 // indirect
|
github.com/golang/protobuf v1.5.2 // indirect
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
||||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||||
github.com/lucasb-eyer/go-colorful v1.0.3 // indirect
|
github.com/lucasb-eyer/go-colorful v1.0.3 // indirect
|
||||||
|
@ -89,8 +99,7 @@ require (
|
||||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb // indirect
|
google.golang.org/genproto v0.0.0-20211223182754-3ac035c7e7cb // indirect
|
||||||
google.golang.org/grpc v1.43.0 // indirect
|
google.golang.org/grpc v1.45.0 // indirect
|
||||||
google.golang.org/protobuf v1.27.1 // indirect
|
|
||||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||||
)
|
)
|
||||||
|
|
41
go.sum
41
go.sum
|
@ -100,10 +100,10 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||||
|
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 h1:JLaf/iINcLyjwbtTsCJjc6rtlASgHeIJPrB6QmwURnA=
|
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894 h1:JLaf/iINcLyjwbtTsCJjc6rtlASgHeIJPrB6QmwURnA=
|
||||||
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
github.com/certifi/gocertifi v0.0.0-20200211180108-c7c1fbc02894/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||||
|
@ -211,6 +211,11 @@ github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG
|
||||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||||
|
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||||
|
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||||
|
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||||
|
@ -233,6 +238,8 @@ github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:
|
||||||
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU=
|
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU=
|
||||||
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
github.com/golang-jwt/jwt/v4 v4.0.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
|
||||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||||
|
github.com/golang/glog v1.0.0 h1:nfP3RFugxnNRyKgeWd4oI1nYvXpxrx8ck8ZrcizshdQ=
|
||||||
|
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||||
|
@ -283,8 +290,9 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
||||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
|
||||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||||
|
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
||||||
|
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||||
|
@ -336,6 +344,8 @@ github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:Fecb
|
||||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
|
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
|
||||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||||
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro=
|
||||||
|
@ -512,7 +522,6 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
||||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||||
github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
|
github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
|
||||||
github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
|
github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
|
||||||
github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo=
|
|
||||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||||
|
@ -561,8 +570,9 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
|
||||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY=
|
|
||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
|
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||||
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||||
github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ=
|
github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ=
|
||||||
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
|
||||||
|
@ -587,7 +597,22 @@ go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||||
|
go.opentelemetry.io/contrib/propagators v0.22.0 h1:KGdv58M2//veiYLIhb31mofaI2LgkIPXXAZVeYVyfd8=
|
||||||
|
go.opentelemetry.io/contrib/propagators v0.22.0/go.mod h1:xGOuXr6lLIF9BXipA4pm6UuOSI0M98U6tsI3khbOiwU=
|
||||||
|
go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM=
|
||||||
|
go.opentelemetry.io/otel v1.6.3 h1:FLOfo8f9JzFVFVyU+MSRJc2HdEAXQgm7pIv2uFKRSZE=
|
||||||
|
go.opentelemetry.io/otel v1.6.3/go.mod h1:7BgNga5fNlF/iZjG06hM3yofffp0ofKCDwSXx1GC4dI=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.6.3/go.mod h1:NEu79Xo32iVb+0gVNV8PMd7GoWqnyDXRlj04yFjqz40=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3 h1:4/UjHWMVVc5VwX/KAtqJOHErKigMCH8NexChMuanb/o=
|
||||||
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.6.3/go.mod h1:UJmXdiVVBaZ63umRUTwJuCMAV//GCMvDiQwn703/GoY=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.6.3 h1:prSHYdwCQOX5DrsEzxowH3nLhoAzEBdZhvrR79scfLs=
|
||||||
|
go.opentelemetry.io/otel/sdk v1.6.3/go.mod h1:A4iWF7HTXa+GWL/AaqESz28VuSBIcZ+0CV+IzJ5NMiQ=
|
||||||
|
go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4=
|
||||||
|
go.opentelemetry.io/otel/trace v1.6.3 h1:IqN4L+5b0mPNjdXIiZ90Ni4Bl5BRkDQywePLWemd9bc=
|
||||||
|
go.opentelemetry.io/otel/trace v1.6.3/go.mod h1:GNJQusJlUgZl9/TQBPKU/Y/ty+0iVB5fjhKeJGZPGFs=
|
||||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||||
|
go.opentelemetry.io/proto/otlp v0.15.0 h1:h0bKrvdrT/9sBwEJ6iWUqT/N/xPcS66bL4u3isneJ6w=
|
||||||
|
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||||
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
|
||||||
go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0=
|
go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0=
|
||||||
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
|
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
|
||||||
|
@ -789,6 +814,7 @@ golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
|
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
|
@ -1029,8 +1055,10 @@ google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnD
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k=
|
||||||
google.golang.org/grpc v1.43.0 h1:Eeu7bZtDZ2DpRCsLhUlcrLnvYaMK1Gz86a+hMVvELmM=
|
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
|
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
|
||||||
|
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
|
@ -1044,8 +1072,9 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
|
||||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
|
||||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||||
|
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
|
||||||
|
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||||
gopkg.in/DataDog/dd-trace-go.v1 v1.34.0/go.mod h1:HtrC65fyJ6lWazShCC9rlOeiTSZJ0XtZhkwjZM2WpC4=
|
gopkg.in/DataDog/dd-trace-go.v1 v1.34.0/go.mod h1:HtrC65fyJ6lWazShCC9rlOeiTSZJ0XtZhkwjZM2WpC4=
|
||||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||||
|
|
|
@ -21,6 +21,7 @@ import (
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/ingress"
|
"github.com/cloudflare/cloudflared/ingress"
|
||||||
"github.com/cloudflare/cloudflared/proxy"
|
"github.com/cloudflare/cloudflared/proxy"
|
||||||
|
"github.com/cloudflare/cloudflared/tracing"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -353,7 +354,7 @@ func proxyHTTP(originProxy connection.OriginProxy, hostname string) (*http.Respo
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = originProxy.ProxyHTTP(respWriter, req, false)
|
err = originProxy.ProxyHTTP(respWriter, tracing.NewTracedRequest(req), false)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -604,7 +605,7 @@ func TestPersistentConnection(t *testing.T) {
|
||||||
respWriter, err := connection.NewHTTP2RespWriter(req, wsRespReadWriter, connection.TypeWebsocket, &log)
|
respWriter, err := connection.NewHTTP2RespWriter(req, wsRespReadWriter, connection.TypeWebsocket, &log)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = originProxy.ProxyHTTP(respWriter, req, true)
|
err = originProxy.ProxyHTTP(respWriter, tracing.NewTracedRequest(req), true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
|
|
@ -10,11 +10,13 @@ import (
|
||||||
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/rs/zerolog"
|
"github.com/rs/zerolog"
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
|
||||||
"github.com/cloudflare/cloudflared/carrier"
|
"github.com/cloudflare/cloudflared/carrier"
|
||||||
"github.com/cloudflare/cloudflared/cfio"
|
"github.com/cloudflare/cloudflared/cfio"
|
||||||
"github.com/cloudflare/cloudflared/connection"
|
"github.com/cloudflare/cloudflared/connection"
|
||||||
"github.com/cloudflare/cloudflared/ingress"
|
"github.com/cloudflare/cloudflared/ingress"
|
||||||
|
"github.com/cloudflare/cloudflared/tracing"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
"github.com/cloudflare/cloudflared/websocket"
|
"github.com/cloudflare/cloudflared/websocket"
|
||||||
)
|
)
|
||||||
|
@ -59,16 +61,18 @@ func NewOriginProxy(
|
||||||
// a simple roundtrip or a tcp/websocket dial depending on ingres rule setup.
|
// a simple roundtrip or a tcp/websocket dial depending on ingres rule setup.
|
||||||
func (p *Proxy) ProxyHTTP(
|
func (p *Proxy) ProxyHTTP(
|
||||||
w connection.ResponseWriter,
|
w connection.ResponseWriter,
|
||||||
req *http.Request,
|
tr *tracing.TracedRequest,
|
||||||
isWebsocket bool,
|
isWebsocket bool,
|
||||||
) error {
|
) error {
|
||||||
incrementRequests()
|
incrementRequests()
|
||||||
defer decrementConcurrentRequests()
|
defer decrementConcurrentRequests()
|
||||||
|
|
||||||
|
req := tr.Request
|
||||||
cfRay := connection.FindCfRayHeader(req)
|
cfRay := connection.FindCfRayHeader(req)
|
||||||
lbProbe := connection.IsLBProbeRequest(req)
|
lbProbe := connection.IsLBProbeRequest(req)
|
||||||
p.appendTagHeaders(req)
|
p.appendTagHeaders(req)
|
||||||
|
|
||||||
|
_, ruleSpan := tr.Tracer().Start(req.Context(), "ingress_match")
|
||||||
rule, ruleNum := p.ingressRules.FindMatchingRule(req.Host, req.URL.Path)
|
rule, ruleNum := p.ingressRules.FindMatchingRule(req.Host, req.URL.Path)
|
||||||
logFields := logFields{
|
logFields := logFields{
|
||||||
cfRay: cfRay,
|
cfRay: cfRay,
|
||||||
|
@ -76,6 +80,8 @@ func (p *Proxy) ProxyHTTP(
|
||||||
rule: ruleNum,
|
rule: ruleNum,
|
||||||
}
|
}
|
||||||
p.logRequest(req, logFields)
|
p.logRequest(req, logFields)
|
||||||
|
ruleSpan.SetAttributes(attribute.Int("rule-num", ruleNum))
|
||||||
|
ruleSpan.End()
|
||||||
|
|
||||||
switch originProxy := rule.Service.(type) {
|
switch originProxy := rule.Service.(type) {
|
||||||
case ingress.HTTPOriginProxy:
|
case ingress.HTTPOriginProxy:
|
||||||
|
@ -92,7 +98,6 @@ func (p *Proxy) ProxyHTTP(
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
|
|
||||||
case ingress.StreamBasedOriginProxy:
|
case ingress.StreamBasedOriginProxy:
|
||||||
dest, err := getDestFromRule(rule, req)
|
dest, err := getDestFromRule(rule, req)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
@ -27,6 +27,7 @@ import (
|
||||||
"github.com/cloudflare/cloudflared/hello"
|
"github.com/cloudflare/cloudflared/hello"
|
||||||
"github.com/cloudflare/cloudflared/ingress"
|
"github.com/cloudflare/cloudflared/ingress"
|
||||||
"github.com/cloudflare/cloudflared/logger"
|
"github.com/cloudflare/cloudflared/logger"
|
||||||
|
"github.com/cloudflare/cloudflared/tracing"
|
||||||
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
tunnelpogs "github.com/cloudflare/cloudflared/tunnelrpc/pogs"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -151,7 +152,7 @@ func testProxyHTTP(proxy connection.OriginProxy) func(t *testing.T) {
|
||||||
req, err := http.NewRequest(http.MethodGet, "http://localhost:8080", nil)
|
req, err := http.NewRequest(http.MethodGet, "http://localhost:8080", nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = proxy.ProxyHTTP(responseWriter, req, false)
|
err = proxy.ProxyHTTP(responseWriter, tracing.NewTracedRequest(req), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
for _, tag := range testTags {
|
for _, tag := range testTags {
|
||||||
assert.Equal(t, tag.Value, req.Header.Get(TagHeaderNamePrefix+tag.Name))
|
assert.Equal(t, tag.Value, req.Header.Get(TagHeaderNamePrefix+tag.Name))
|
||||||
|
@ -178,7 +179,7 @@ func testProxyWebsocket(proxy connection.OriginProxy) func(t *testing.T) {
|
||||||
|
|
||||||
errGroup, ctx := errgroup.WithContext(ctx)
|
errGroup, ctx := errgroup.WithContext(ctx)
|
||||||
errGroup.Go(func() error {
|
errGroup.Go(func() error {
|
||||||
err = proxy.ProxyHTTP(responseWriter, req, true)
|
err = proxy.ProxyHTTP(responseWriter, tracing.NewTracedRequest(req), true)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, http.StatusSwitchingProtocols, responseWriter.Code)
|
require.Equal(t, http.StatusSwitchingProtocols, responseWriter.Code)
|
||||||
|
@ -239,7 +240,7 @@ func testProxySSE(proxy connection.OriginProxy) func(t *testing.T) {
|
||||||
wg.Add(1)
|
wg.Add(1)
|
||||||
go func() {
|
go func() {
|
||||||
defer wg.Done()
|
defer wg.Done()
|
||||||
err = proxy.ProxyHTTP(responseWriter, req, false)
|
err = proxy.ProxyHTTP(responseWriter, tracing.NewTracedRequest(req), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
require.Equal(t, http.StatusOK, responseWriter.Code)
|
require.Equal(t, http.StatusOK, responseWriter.Code)
|
||||||
|
@ -351,7 +352,7 @@ func runIngressTestScenarios(t *testing.T, unvalidatedIngress []config.Unvalidat
|
||||||
req, err := http.NewRequest(http.MethodGet, test.url, nil)
|
req, err := http.NewRequest(http.MethodGet, test.url, nil)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
err = proxy.ProxyHTTP(responseWriter, req, false)
|
err = proxy.ProxyHTTP(responseWriter, tracing.NewTracedRequest(req), false)
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
|
|
||||||
assert.Equal(t, test.expectedStatus, responseWriter.Code)
|
assert.Equal(t, test.expectedStatus, responseWriter.Code)
|
||||||
|
@ -398,7 +399,7 @@ func TestProxyError(t *testing.T) {
|
||||||
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1", nil)
|
req, err := http.NewRequest(http.MethodGet, "http://127.0.0.1", nil)
|
||||||
assert.NoError(t, err)
|
assert.NoError(t, err)
|
||||||
|
|
||||||
assert.Error(t, proxy.ProxyHTTP(responseWriter, req, false))
|
assert.Error(t, proxy.ProxyHTTP(responseWriter, tracing.NewTracedRequest(req), false))
|
||||||
}
|
}
|
||||||
|
|
||||||
type replayer struct {
|
type replayer struct {
|
||||||
|
@ -676,7 +677,7 @@ func TestConnections(t *testing.T) {
|
||||||
rwa := connection.NewHTTPResponseReadWriterAcker(respWriter, req)
|
rwa := connection.NewHTTPResponseReadWriterAcker(respWriter, req)
|
||||||
err = proxy.ProxyTCP(ctx, rwa, &connection.TCPRequest{Dest: dest})
|
err = proxy.ProxyTCP(ctx, rwa, &connection.TCPRequest{Dest: dest})
|
||||||
} else {
|
} else {
|
||||||
err = proxy.ProxyHTTP(respWriter, req, test.args.connectionType == connection.TypeWebsocket)
|
err = proxy.ProxyHTTP(respWriter, tracing.NewTracedRequest(req), test.args.connectionType == connection.TypeWebsocket)
|
||||||
}
|
}
|
||||||
|
|
||||||
cancel()
|
cancel()
|
||||||
|
|
|
@ -0,0 +1,90 @@
|
||||||
|
package tracing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
|
||||||
|
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxTraceAmount = 20
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errNoTraces = errors.New("no traces recorded to be exported")
|
||||||
|
)
|
||||||
|
|
||||||
|
type InMemoryClient interface {
|
||||||
|
// Spans returns a copy of the list of in-memory stored spans as a base64
|
||||||
|
// encoded otlp protobuf string.
|
||||||
|
Spans() (string, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
// InMemoryOtlpClient is a client implementation for otlptrace.Client
|
||||||
|
type InMemoryOtlpClient struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
spans []*tracepb.ResourceSpans
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *InMemoryOtlpClient) Start(_ context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *InMemoryOtlpClient) Stop(_ context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// UploadTraces adds the provided list of spans to the in-memory list.
|
||||||
|
func (mc *InMemoryOtlpClient) UploadTraces(_ context.Context, protoSpans []*tracepb.ResourceSpans) error {
|
||||||
|
mc.mu.Lock()
|
||||||
|
defer mc.mu.Unlock()
|
||||||
|
// Catch to make sure too many traces aren't being added to response header.
|
||||||
|
// Returning nil makes sure we don't fail to send the traces we already recorded.
|
||||||
|
if len(mc.spans)+len(protoSpans) > maxTraceAmount {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
mc.spans = append(mc.spans, protoSpans...)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spans returns the list of in-memory stored spans as a base64 encoded otlp protobuf string.
|
||||||
|
func (mc *InMemoryOtlpClient) Spans() (string, error) {
|
||||||
|
mc.mu.Lock()
|
||||||
|
defer mc.mu.Unlock()
|
||||||
|
if len(mc.spans) <= 0 {
|
||||||
|
return "", errNoTraces
|
||||||
|
}
|
||||||
|
pbRequest := &coltracepb.ExportTraceServiceRequest{
|
||||||
|
ResourceSpans: mc.spans,
|
||||||
|
}
|
||||||
|
data, err := proto.Marshal(pbRequest)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
return base64.StdEncoding.EncodeToString(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NoopOtlpClient is a client implementation for otlptrace.Client that does nothing
|
||||||
|
type NoopOtlpClient struct{}
|
||||||
|
|
||||||
|
func (mc *NoopOtlpClient) Start(_ context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *NoopOtlpClient) Stop(_ context.Context) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mc *NoopOtlpClient) UploadTraces(_ context.Context, _ []*tracepb.ResourceSpans) error {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spans always returns no traces error
|
||||||
|
func (mc *NoopOtlpClient) Spans() (string, error) {
|
||||||
|
return "", errNoTraces
|
||||||
|
}
|
|
@ -0,0 +1,161 @@
|
||||||
|
package tracing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"testing"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
commonpb "go.opentelemetry.io/proto/otlp/common/v1"
|
||||||
|
resourcepb "go.opentelemetry.io/proto/otlp/resource/v1"
|
||||||
|
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
resourceSchemaUrl = "http://example.com/custom-resource-schema"
|
||||||
|
instrumentSchemaUrl = semconv.SchemaURL
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
traceId = []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F}
|
||||||
|
spanId = []byte{0xFF, 0xFE, 0xFD, 0xFC, 0xFB, 0xFA, 0xF9, 0xF8}
|
||||||
|
parentSpanId = []byte{0x0F, 0x0E, 0x0D, 0x0C, 0x0B, 0x0A, 0x09, 0x08}
|
||||||
|
startTime = time.Date(2022, 4, 4, 0, 0, 0, 0, time.UTC)
|
||||||
|
endTime = startTime.Add(5 * time.Second)
|
||||||
|
|
||||||
|
traceState, _ = trace.ParseTraceState("key1=val1,key2=val2")
|
||||||
|
instrScope = &commonpb.InstrumentationScope{Name: "go.opentelemetry.io/test/otel", Version: "v1.6.0"}
|
||||||
|
otlpKeyValues = []*commonpb.KeyValue{
|
||||||
|
{
|
||||||
|
Key: "string_key",
|
||||||
|
Value: &commonpb.AnyValue{
|
||||||
|
Value: &commonpb.AnyValue_StringValue{
|
||||||
|
StringValue: "string value",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
Key: "bool_key",
|
||||||
|
Value: &commonpb.AnyValue{
|
||||||
|
Value: &commonpb.AnyValue_BoolValue{
|
||||||
|
BoolValue: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
otlpResource = &resourcepb.Resource{
|
||||||
|
Attributes: []*commonpb.KeyValue{
|
||||||
|
{
|
||||||
|
Key: "service.name",
|
||||||
|
Value: &commonpb.AnyValue{
|
||||||
|
Value: &commonpb.AnyValue_StringValue{
|
||||||
|
StringValue: "service-name",
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
var _ otlptrace.Client = (*InMemoryOtlpClient)(nil)
|
||||||
|
var _ InMemoryClient = (*InMemoryOtlpClient)(nil)
|
||||||
|
var _ otlptrace.Client = (*NoopOtlpClient)(nil)
|
||||||
|
var _ InMemoryClient = (*NoopOtlpClient)(nil)
|
||||||
|
|
||||||
|
func TestUploadTraces(t *testing.T) {
|
||||||
|
client := &InMemoryOtlpClient{}
|
||||||
|
spans := createResourceSpans([]*tracepb.Span{createOtlpSpan(traceId)})
|
||||||
|
spans2 := createResourceSpans([]*tracepb.Span{createOtlpSpan(traceId)})
|
||||||
|
err := client.UploadTraces(context.Background(), spans)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
err = client.UploadTraces(context.Background(), spans2)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, client.spans, 2)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSpans(t *testing.T) {
|
||||||
|
client := &InMemoryOtlpClient{}
|
||||||
|
spans := createResourceSpans([]*tracepb.Span{createOtlpSpan(traceId)})
|
||||||
|
err := client.UploadTraces(context.Background(), spans)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, client.spans, 1)
|
||||||
|
enc, err := client.Spans()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expected := "CsECCiAKHgoMc2VydmljZS5uYW1lEg4KDHNlcnZpY2UtbmFtZRLxAQonCh1nby5vcGVudGVsZW1ldHJ5LmlvL3Rlc3Qvb3RlbBIGdjEuNi4wEp0BChAAAQIDBAUGBwgJCgsMDQ4PEgj//v38+/r5+BoTa2V5MT12YWwxLGtleTI9dmFsMiIIDw4NDAsKCQgqCnRyYWNlX25hbWUwATkAANJvaYjiFkEA8teZaojiFkocCgpzdHJpbmdfa2V5Eg4KDHN0cmluZyB2YWx1ZUoOCghib29sX2tleRICEAF6EhIOc3RhdHVzIG1lc3NhZ2UYARomaHR0cHM6Ly9vcGVudGVsZW1ldHJ5LmlvL3NjaGVtYXMvMS43LjAaKWh0dHA6Ly9leGFtcGxlLmNvbS9jdXN0b20tcmVzb3VyY2Utc2NoZW1h"
|
||||||
|
assert.Equal(t, expected, enc)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSpansEmpty(t *testing.T) {
|
||||||
|
client := &InMemoryOtlpClient{}
|
||||||
|
err := client.UploadTraces(context.Background(), []*tracepb.ResourceSpans{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, client.spans, 0)
|
||||||
|
_, err = client.Spans()
|
||||||
|
assert.ErrorIs(t, err, errNoTraces)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSpansNil(t *testing.T) {
|
||||||
|
client := &InMemoryOtlpClient{}
|
||||||
|
err := client.UploadTraces(context.Background(), nil)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
assert.Len(t, client.spans, 0)
|
||||||
|
_, err = client.Spans()
|
||||||
|
assert.ErrorIs(t, err, errNoTraces)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestSpansTooManySpans(t *testing.T) {
|
||||||
|
client := &InMemoryOtlpClient{}
|
||||||
|
for i := 0; i < maxTraceAmount+1; i++ {
|
||||||
|
spans := createResourceSpans([]*tracepb.Span{createOtlpSpan(traceId)})
|
||||||
|
err := client.UploadTraces(context.Background(), spans)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
assert.Len(t, client.spans, maxTraceAmount)
|
||||||
|
_, err := client.Spans()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func createResourceSpans(spans []*tracepb.Span) []*tracepb.ResourceSpans {
|
||||||
|
return []*tracepb.ResourceSpans{createResourceSpan(spans)}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createResourceSpan(spans []*tracepb.Span) *tracepb.ResourceSpans {
|
||||||
|
return &tracepb.ResourceSpans{
|
||||||
|
Resource: otlpResource,
|
||||||
|
ScopeSpans: []*tracepb.ScopeSpans{
|
||||||
|
{
|
||||||
|
Scope: instrScope,
|
||||||
|
Spans: spans,
|
||||||
|
SchemaUrl: instrumentSchemaUrl,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
InstrumentationLibrarySpans: nil,
|
||||||
|
SchemaUrl: resourceSchemaUrl,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func createOtlpSpan(tid []byte) *tracepb.Span {
|
||||||
|
return &tracepb.Span{
|
||||||
|
TraceId: tid,
|
||||||
|
SpanId: spanId,
|
||||||
|
TraceState: traceState.String(),
|
||||||
|
ParentSpanId: parentSpanId,
|
||||||
|
Name: "trace_name",
|
||||||
|
Kind: tracepb.Span_SPAN_KIND_INTERNAL,
|
||||||
|
StartTimeUnixNano: uint64(startTime.UnixNano()),
|
||||||
|
EndTimeUnixNano: uint64(endTime.UnixNano()),
|
||||||
|
Attributes: otlpKeyValues,
|
||||||
|
DroppedAttributesCount: 0,
|
||||||
|
Events: nil,
|
||||||
|
DroppedEventsCount: 0,
|
||||||
|
Links: nil,
|
||||||
|
DroppedLinksCount: 0,
|
||||||
|
Status: &tracepb.Status{
|
||||||
|
Message: "status message",
|
||||||
|
Code: tracepb.Status_STATUS_CODE_OK,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,116 @@
|
||||||
|
package tracing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
otelContrib "go.opentelemetry.io/contrib/propagators/Jaeger"
|
||||||
|
"go.opentelemetry.io/otel"
|
||||||
|
"go.opentelemetry.io/otel/attribute"
|
||||||
|
"go.opentelemetry.io/otel/codes"
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
"go.opentelemetry.io/otel/propagation"
|
||||||
|
"go.opentelemetry.io/otel/sdk/resource"
|
||||||
|
tracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
semconv "go.opentelemetry.io/otel/semconv/v1.7.0"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
service = "cloudflared"
|
||||||
|
tracerInstrumentName = "origin"
|
||||||
|
|
||||||
|
tracerContextName = "cf-trace-id"
|
||||||
|
tracerContextNameOverride = "uber-trace-id"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
Http2TransportAttribute = trace.WithAttributes(TransportAttributeKey.String("http2"))
|
||||||
|
QuicTransportAttribute = trace.WithAttributes(TransportAttributeKey.String("quic"))
|
||||||
|
|
||||||
|
TransportAttributeKey = attribute.Key("transport")
|
||||||
|
TrafficAttributeKey = attribute.Key("traffic")
|
||||||
|
|
||||||
|
errNoopTracerProvider = errors.New("noop tracer provider records no spans")
|
||||||
|
)
|
||||||
|
|
||||||
|
func init() {
|
||||||
|
// Register the jaeger propagator globally.
|
||||||
|
otel.SetTextMapPropagator(otelContrib.Jaeger{})
|
||||||
|
}
|
||||||
|
|
||||||
|
type TracedRequest struct {
|
||||||
|
*http.Request
|
||||||
|
trace.TracerProvider
|
||||||
|
exporter InMemoryClient
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewTracedRequest creates a new tracer for the current request context.
|
||||||
|
func NewTracedRequest(req *http.Request) *TracedRequest {
|
||||||
|
ctx, exists := extractTrace(req)
|
||||||
|
if !exists {
|
||||||
|
return &TracedRequest{req, trace.NewNoopTracerProvider(), &NoopOtlpClient{}}
|
||||||
|
}
|
||||||
|
mc := new(InMemoryOtlpClient)
|
||||||
|
exp, err := otlptrace.New(req.Context(), mc)
|
||||||
|
if err != nil {
|
||||||
|
return &TracedRequest{req, trace.NewNoopTracerProvider(), &NoopOtlpClient{}}
|
||||||
|
}
|
||||||
|
tp := tracesdk.NewTracerProvider(
|
||||||
|
// We want to dump to in-memory exporter immediately
|
||||||
|
tracesdk.WithSyncer(exp),
|
||||||
|
// Record information about this application in a Resource.
|
||||||
|
tracesdk.WithResource(resource.NewWithAttributes(
|
||||||
|
semconv.SchemaURL,
|
||||||
|
semconv.ServiceNameKey.String(service),
|
||||||
|
)),
|
||||||
|
)
|
||||||
|
|
||||||
|
return &TracedRequest{req.WithContext(ctx), tp, mc}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (cft *TracedRequest) Tracer() trace.Tracer {
|
||||||
|
return cft.TracerProvider.Tracer(tracerInstrumentName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Spans returns the spans as base64 encoded protobuf otlp traces.
|
||||||
|
func (cft *TracedRequest) Spans() (string, error) {
|
||||||
|
return cft.exporter.Spans()
|
||||||
|
}
|
||||||
|
|
||||||
|
// EndWithStatus will set a status for the span and then end it.
|
||||||
|
func EndWithStatus(span trace.Span, code codes.Code, status string) {
|
||||||
|
if span == nil {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
span.SetStatus(code, status)
|
||||||
|
span.End()
|
||||||
|
}
|
||||||
|
|
||||||
|
// extractTrace attempts to check for a cf-trace-id from a request header.
|
||||||
|
func extractTrace(req *http.Request) (context.Context, bool) {
|
||||||
|
// Only add tracing for requests with appropriately tagged headers
|
||||||
|
remoteTraces := req.Header.Values(tracerContextName)
|
||||||
|
if len(remoteTraces) <= 0 {
|
||||||
|
// Strip the cf-trace-id header
|
||||||
|
req.Header.Del(tracerContextName)
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
|
||||||
|
traceHeader := make(map[string]string, 1)
|
||||||
|
for _, t := range remoteTraces {
|
||||||
|
// Override the 'cf-trace-id' as 'uber-trace-id' so the jaeger propagator can extract it.
|
||||||
|
// Last entry wins if multiple provided
|
||||||
|
traceHeader[tracerContextNameOverride] = t
|
||||||
|
}
|
||||||
|
|
||||||
|
// Strip the cf-trace-id header
|
||||||
|
req.Header.Del(tracerContextName)
|
||||||
|
|
||||||
|
if traceHeader[tracerContextNameOverride] == "" {
|
||||||
|
return nil, false
|
||||||
|
}
|
||||||
|
remoteCtx := otel.GetTextMapPropagator().Extract(req.Context(), propagation.MapCarrier(traceHeader))
|
||||||
|
return remoteCtx, true
|
||||||
|
}
|
|
@ -0,0 +1,50 @@
|
||||||
|
package tracing
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
"net/http/httptest"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
tracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestNewCfTracer(t *testing.T) {
|
||||||
|
req := httptest.NewRequest("GET", "http://localhost", nil)
|
||||||
|
req.Header.Add(tracerContextName, "14cb070dde8e51fc5ae8514e69ba42ca:b38f1bf5eae406f3:0:1")
|
||||||
|
tr := NewTracedRequest(req)
|
||||||
|
assert.NotNil(t, tr)
|
||||||
|
assert.IsType(t, tracesdk.NewTracerProvider(), tr.TracerProvider)
|
||||||
|
assert.IsType(t, &InMemoryOtlpClient{}, tr.exporter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewCfTracerMultiple(t *testing.T) {
|
||||||
|
req := httptest.NewRequest("GET", "http://localhost", nil)
|
||||||
|
req.Header.Add(tracerContextName, "1241ce3ecdefc68854e8514e69ba42ca:b38f1bf5eae406f3:0:1")
|
||||||
|
req.Header.Add(tracerContextName, "14cb070dde8e51fc5ae8514e69ba42ca:b38f1bf5eae406f3:0:1")
|
||||||
|
tr := NewTracedRequest(req)
|
||||||
|
assert.NotNil(t, tr)
|
||||||
|
assert.IsType(t, tracesdk.NewTracerProvider(), tr.TracerProvider)
|
||||||
|
assert.IsType(t, &InMemoryOtlpClient{}, tr.exporter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewCfTracerNilHeader(t *testing.T) {
|
||||||
|
req := httptest.NewRequest("GET", "http://localhost", nil)
|
||||||
|
req.Header[http.CanonicalHeaderKey(tracerContextName)] = nil
|
||||||
|
tr := NewTracedRequest(req)
|
||||||
|
assert.NotNil(t, tr)
|
||||||
|
assert.IsType(t, trace.NewNoopTracerProvider(), tr.TracerProvider)
|
||||||
|
assert.IsType(t, &NoopOtlpClient{}, tr.exporter)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestNewCfTracerInvalidHeaders(t *testing.T) {
|
||||||
|
req := httptest.NewRequest("GET", "http://localhost", nil)
|
||||||
|
for _, test := range [][]string{nil, {""}} {
|
||||||
|
req.Header[http.CanonicalHeaderKey(tracerContextName)] = test
|
||||||
|
tr := NewTracedRequest(req)
|
||||||
|
assert.NotNil(t, tr)
|
||||||
|
assert.IsType(t, trace.NewNoopTracerProvider(), tr.TracerProvider)
|
||||||
|
assert.IsType(t, &NoopOtlpClient{}, tr.exporter)
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
run:
|
||||||
|
timeout: 1m
|
||||||
|
tests: true
|
||||||
|
|
||||||
|
linters:
|
||||||
|
disable-all: true
|
||||||
|
enable:
|
||||||
|
- asciicheck
|
||||||
|
- deadcode
|
||||||
|
- errcheck
|
||||||
|
- forcetypeassert
|
||||||
|
- gocritic
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
|
- misspell
|
||||||
|
- revive
|
||||||
|
- staticcheck
|
||||||
|
- structcheck
|
||||||
|
- typecheck
|
||||||
|
- unused
|
||||||
|
- varcheck
|
||||||
|
|
||||||
|
issues:
|
||||||
|
exclude-use-default: false
|
||||||
|
max-issues-per-linter: 0
|
||||||
|
max-same-issues: 10
|
|
@ -0,0 +1,6 @@
|
||||||
|
# CHANGELOG
|
||||||
|
|
||||||
|
## v1.0.0-rc1
|
||||||
|
|
||||||
|
This is the first logged release. Major changes (including breaking changes)
|
||||||
|
have occurred since earlier tags.
|
|
@ -0,0 +1,17 @@
|
||||||
|
# Contributing
|
||||||
|
|
||||||
|
Logr is open to pull-requests, provided they fit within the intended scope of
|
||||||
|
the project. Specifically, this library aims to be VERY small and minimalist,
|
||||||
|
with no external dependencies.
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
This project intends to follow [semantic versioning](http://semver.org) and
|
||||||
|
is very strict about compatibility. Any proposed changes MUST follow those
|
||||||
|
rules.
|
||||||
|
|
||||||
|
## Performance
|
||||||
|
|
||||||
|
As a logging library, logr must be as light-weight as possible. Any proposed
|
||||||
|
code change must include results of running the [benchmark](./benchmark)
|
||||||
|
before and after the change.
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright {yyyy} {name of copyright owner}
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,282 @@
|
||||||
|
# A minimal logging API for Go
|
||||||
|
|
||||||
|
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr)
|
||||||
|
|
||||||
|
logr offers an(other) opinion on how Go programs and libraries can do logging
|
||||||
|
without becoming coupled to a particular logging implementation. This is not
|
||||||
|
an implementation of logging - it is an API. In fact it is two APIs with two
|
||||||
|
different sets of users.
|
||||||
|
|
||||||
|
The `Logger` type is intended for application and library authors. It provides
|
||||||
|
a relatively small API which can be used everywhere you want to emit logs. It
|
||||||
|
defers the actual act of writing logs (to files, to stdout, or whatever) to the
|
||||||
|
`LogSink` interface.
|
||||||
|
|
||||||
|
The `LogSink` interface is intended for logging library implementers. It is a
|
||||||
|
pure interface which can be implemented by logging frameworks to provide the actual logging
|
||||||
|
functionality.
|
||||||
|
|
||||||
|
This decoupling allows application and library developers to write code in
|
||||||
|
terms of `logr.Logger` (which has very low dependency fan-out) while the
|
||||||
|
implementation of logging is managed "up stack" (e.g. in or near `main()`.)
|
||||||
|
Application developers can then switch out implementations as necessary.
|
||||||
|
|
||||||
|
Many people assert that libraries should not be logging, and as such efforts
|
||||||
|
like this are pointless. Those people are welcome to convince the authors of
|
||||||
|
the tens-of-thousands of libraries that *DO* write logs that they are all
|
||||||
|
wrong. In the meantime, logr takes a more practical approach.
|
||||||
|
|
||||||
|
## Typical usage
|
||||||
|
|
||||||
|
Somewhere, early in an application's life, it will make a decision about which
|
||||||
|
logging library (implementation) it actually wants to use. Something like:
|
||||||
|
|
||||||
|
```
|
||||||
|
func main() {
|
||||||
|
// ... other setup code ...
|
||||||
|
|
||||||
|
// Create the "root" logger. We have chosen the "logimpl" implementation,
|
||||||
|
// which takes some initial parameters and returns a logr.Logger.
|
||||||
|
logger := logimpl.New(param1, param2)
|
||||||
|
|
||||||
|
// ... other setup code ...
|
||||||
|
```
|
||||||
|
|
||||||
|
Most apps will call into other libraries, create structures to govern the flow,
|
||||||
|
etc. The `logr.Logger` object can be passed to these other libraries, stored
|
||||||
|
in structs, or even used as a package-global variable, if needed. For example:
|
||||||
|
|
||||||
|
```
|
||||||
|
app := createTheAppObject(logger)
|
||||||
|
app.Run()
|
||||||
|
```
|
||||||
|
|
||||||
|
Outside of this early setup, no other packages need to know about the choice of
|
||||||
|
implementation. They write logs in terms of the `logr.Logger` that they
|
||||||
|
received:
|
||||||
|
|
||||||
|
```
|
||||||
|
type appObject struct {
|
||||||
|
// ... other fields ...
|
||||||
|
logger logr.Logger
|
||||||
|
// ... other fields ...
|
||||||
|
}
|
||||||
|
|
||||||
|
func (app *appObject) Run() {
|
||||||
|
app.logger.Info("starting up", "timestamp", time.Now())
|
||||||
|
|
||||||
|
// ... app code ...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Background
|
||||||
|
|
||||||
|
If the Go standard library had defined an interface for logging, this project
|
||||||
|
probably would not be needed. Alas, here we are.
|
||||||
|
|
||||||
|
### Inspiration
|
||||||
|
|
||||||
|
Before you consider this package, please read [this blog post by the
|
||||||
|
inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what
|
||||||
|
he has to say, and it largely aligns with our own experiences.
|
||||||
|
|
||||||
|
### Differences from Dave's ideas
|
||||||
|
|
||||||
|
The main differences are:
|
||||||
|
|
||||||
|
1. Dave basically proposes doing away with the notion of a logging API in favor
|
||||||
|
of `fmt.Printf()`. We disagree, especially when you consider things like output
|
||||||
|
locations, timestamps, file and line decorations, and structured logging. This
|
||||||
|
package restricts the logging API to just 2 types of logs: info and error.
|
||||||
|
|
||||||
|
Info logs are things you want to tell the user which are not errors. Error
|
||||||
|
logs are, well, errors. If your code receives an `error` from a subordinate
|
||||||
|
function call and is logging that `error` *and not returning it*, use error
|
||||||
|
logs.
|
||||||
|
|
||||||
|
2. Verbosity-levels on info logs. This gives developers a chance to indicate
|
||||||
|
arbitrary grades of importance for info logs, without assigning names with
|
||||||
|
semantic meaning such as "warning", "trace", and "debug." Superficially this
|
||||||
|
may feel very similar, but the primary difference is the lack of semantics.
|
||||||
|
Because verbosity is a numerical value, it's safe to assume that an app running
|
||||||
|
with higher verbosity means more (and less important) logs will be generated.
|
||||||
|
|
||||||
|
## Implementations (non-exhaustive)
|
||||||
|
|
||||||
|
There are implementations for the following logging libraries:
|
||||||
|
|
||||||
|
- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
|
||||||
|
- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
|
||||||
|
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
|
||||||
|
- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
|
||||||
|
- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
|
||||||
|
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
|
||||||
|
- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
|
||||||
|
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
|
||||||
|
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
|
||||||
|
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
|
||||||
|
- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
|
||||||
|
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
|
||||||
|
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
|
||||||
|
|
||||||
|
## FAQ
|
||||||
|
|
||||||
|
### Conceptual
|
||||||
|
|
||||||
|
#### Why structured logging?
|
||||||
|
|
||||||
|
- **Structured logs are more easily queryable**: Since you've got
|
||||||
|
key-value pairs, it's much easier to query your structured logs for
|
||||||
|
particular values by filtering on the contents of a particular key --
|
||||||
|
think searching request logs for error codes, Kubernetes reconcilers for
|
||||||
|
the name and namespace of the reconciled object, etc.
|
||||||
|
|
||||||
|
- **Structured logging makes it easier to have cross-referenceable logs**:
|
||||||
|
Similarly to searchability, if you maintain conventions around your
|
||||||
|
keys, it becomes easy to gather all log lines related to a particular
|
||||||
|
concept.
|
||||||
|
|
||||||
|
- **Structured logs allow better dimensions of filtering**: if you have
|
||||||
|
structure to your logs, you've got more precise control over how much
|
||||||
|
information is logged -- you might choose in a particular configuration
|
||||||
|
to log certain keys but not others, only log lines where a certain key
|
||||||
|
matches a certain value, etc., instead of just having v-levels and names
|
||||||
|
to key off of.
|
||||||
|
|
||||||
|
- **Structured logs better represent structured data**: sometimes, the
|
||||||
|
data that you want to log is inherently structured (think tuple-link
|
||||||
|
objects.) Structured logs allow you to preserve that structure when
|
||||||
|
outputting.
|
||||||
|
|
||||||
|
#### Why V-levels?
|
||||||
|
|
||||||
|
**V-levels give operators an easy way to control the chattiness of log
|
||||||
|
operations**. V-levels provide a way for a given package to distinguish
|
||||||
|
the relative importance or verbosity of a given log message. Then, if
|
||||||
|
a particular logger or package is logging too many messages, the user
|
||||||
|
of the package can simply change the v-levels for that library.
|
||||||
|
|
||||||
|
#### Why not named levels, like Info/Warning/Error?
|
||||||
|
|
||||||
|
Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences
|
||||||
|
from Dave's ideas](#differences-from-daves-ideas).
|
||||||
|
|
||||||
|
#### Why not allow format strings, too?
|
||||||
|
|
||||||
|
**Format strings negate many of the benefits of structured logs**:
|
||||||
|
|
||||||
|
- They're not easily searchable without resorting to fuzzy searching,
|
||||||
|
regular expressions, etc.
|
||||||
|
|
||||||
|
- They don't store structured data well, since contents are flattened into
|
||||||
|
a string.
|
||||||
|
|
||||||
|
- They're not cross-referenceable.
|
||||||
|
|
||||||
|
- They don't compress easily, since the message is not constant.
|
||||||
|
|
||||||
|
(Unless you turn positional parameters into key-value pairs with numerical
|
||||||
|
keys, at which point you've gotten key-value logging with meaningless
|
||||||
|
keys.)
|
||||||
|
|
||||||
|
### Practical
|
||||||
|
|
||||||
|
#### Why key-value pairs, and not a map?
|
||||||
|
|
||||||
|
Key-value pairs are *much* easier to optimize, especially around
|
||||||
|
allocations. Zap (a structured logger that inspired logr's interface) has
|
||||||
|
[performance measurements](https://github.com/uber-go/zap#performance)
|
||||||
|
that show this quite nicely.
|
||||||
|
|
||||||
|
While the interface ends up being a little less obvious, you get
|
||||||
|
potentially better performance, plus avoid making users type
|
||||||
|
`map[string]string{}` every time they want to log.
|
||||||
|
|
||||||
|
#### What if my V-levels differ between libraries?
|
||||||
|
|
||||||
|
That's fine. Control your V-levels on a per-logger basis, and use the
|
||||||
|
`WithName` method to pass different loggers to different libraries.
|
||||||
|
|
||||||
|
Generally, you should take care to ensure that you have relatively
|
||||||
|
consistent V-levels within a given logger, however, as this makes deciding
|
||||||
|
on what verbosity of logs to request easier.
|
||||||
|
|
||||||
|
#### But I really want to use a format string!
|
||||||
|
|
||||||
|
That's not actually a question. Assuming your question is "how do
|
||||||
|
I convert my mental model of logging with format strings to logging with
|
||||||
|
constant messages":
|
||||||
|
|
||||||
|
1. Figure out what the error actually is, as you'd write in a TL;DR style,
|
||||||
|
and use that as a message.
|
||||||
|
|
||||||
|
2. For every place you'd write a format specifier, look to the word before
|
||||||
|
it, and add that as a key value pair.
|
||||||
|
|
||||||
|
For instance, consider the following examples (all taken from spots in the
|
||||||
|
Kubernetes codebase):
|
||||||
|
|
||||||
|
- `klog.V(4).Infof("Client is returning errors: code %v, error %v",
|
||||||
|
responseCode, err)` becomes `logger.Error(err, "client returned an
|
||||||
|
error", "code", responseCode)`
|
||||||
|
|
||||||
|
- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v",
|
||||||
|
seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after
|
||||||
|
response when requesting url", "attempt", retries, "after
|
||||||
|
seconds", seconds, "url", url)`
|
||||||
|
|
||||||
|
If you *really* must use a format string, use it in a key's value, and
|
||||||
|
call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to
|
||||||
|
reflect over type %T")` becomes `logger.Info("unable to reflect over
|
||||||
|
type", "type", fmt.Sprintf("%T"))`. In general though, the cases where
|
||||||
|
this is necessary should be few and far between.
|
||||||
|
|
||||||
|
#### How do I choose my V-levels?
|
||||||
|
|
||||||
|
This is basically the only hard constraint: increase V-levels to denote
|
||||||
|
more verbose or more debug-y logs.
|
||||||
|
|
||||||
|
Otherwise, you can start out with `0` as "you always want to see this",
|
||||||
|
`1` as "common logging that you might *possibly* want to turn off", and
|
||||||
|
`10` as "I would like to performance-test your log collection stack."
|
||||||
|
|
||||||
|
Then gradually choose levels in between as you need them, working your way
|
||||||
|
down from 10 (for debug and trace style logs) and up from 1 (for chattier
|
||||||
|
info-type logs.)
|
||||||
|
|
||||||
|
#### How do I choose my keys?
|
||||||
|
|
||||||
|
Keys are fairly flexible, and can hold more or less any string
|
||||||
|
value. For best compatibility with implementations and consistency
|
||||||
|
with existing code in other projects, there are a few conventions you
|
||||||
|
should consider.
|
||||||
|
|
||||||
|
- Make your keys human-readable.
|
||||||
|
- Constant keys are generally a good idea.
|
||||||
|
- Be consistent across your codebase.
|
||||||
|
- Keys should naturally match parts of the message string.
|
||||||
|
- Use lower case for simple keys and
|
||||||
|
[lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for
|
||||||
|
more complex ones. Kubernetes is one example of a project that has
|
||||||
|
[adopted that
|
||||||
|
convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments).
|
||||||
|
|
||||||
|
While key names are mostly unrestricted (and spaces are acceptable),
|
||||||
|
it's generally a good idea to stick to printable ascii characters, or at
|
||||||
|
least match the general character set of your log lines.
|
||||||
|
|
||||||
|
#### Why should keys be constant values?
|
||||||
|
|
||||||
|
The point of structured logging is to make later log processing easier. Your
|
||||||
|
keys are, effectively, the schema of each log message. If you use different
|
||||||
|
keys across instances of the same log line, you will make your structured logs
|
||||||
|
much harder to use. `Sprintf()` is for values, not for keys!
|
||||||
|
|
||||||
|
#### Why is this not a pure interface?
|
||||||
|
|
||||||
|
The Logger type is implemented as a struct in order to allow the Go compiler to
|
||||||
|
optimize things like high-V `Info` logs that are not triggered. Not all of
|
||||||
|
these implementations are implemented yet, but this structure was suggested as
|
||||||
|
a way to ensure they *can* be implemented. All of the real work is behind the
|
||||||
|
`LogSink` interface.
|
||||||
|
|
||||||
|
[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
|
@ -0,0 +1,54 @@
|
||||||
|
/*
|
||||||
|
Copyright 2020 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
package logr
|
||||||
|
|
||||||
|
// Discard returns a Logger that discards all messages logged to it. It can be
|
||||||
|
// used whenever the caller is not interested in the logs. Logger instances
|
||||||
|
// produced by this function always compare as equal.
|
||||||
|
func Discard() Logger {
|
||||||
|
return Logger{
|
||||||
|
level: 0,
|
||||||
|
sink: discardLogSink{},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// discardLogSink is a LogSink that discards all messages.
|
||||||
|
type discardLogSink struct{}
|
||||||
|
|
||||||
|
// Verify that it actually implements the interface
|
||||||
|
var _ LogSink = discardLogSink{}
|
||||||
|
|
||||||
|
func (l discardLogSink) Init(RuntimeInfo) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l discardLogSink) Enabled(int) bool {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l discardLogSink) Info(int, string, ...interface{}) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l discardLogSink) Error(error, string, ...interface{}) {
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l discardLogSink) WithValues(...interface{}) LogSink {
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l discardLogSink) WithName(string) LogSink {
|
||||||
|
return l
|
||||||
|
}
|
|
@ -0,0 +1,787 @@
|
||||||
|
/*
|
||||||
|
Copyright 2021 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package funcr implements formatting of structured log messages and
|
||||||
|
// optionally captures the call site and timestamp.
|
||||||
|
//
|
||||||
|
// The simplest way to use it is via its implementation of a
|
||||||
|
// github.com/go-logr/logr.LogSink with output through an arbitrary
|
||||||
|
// "write" function. See New and NewJSON for details.
|
||||||
|
//
|
||||||
|
// Custom LogSinks
|
||||||
|
//
|
||||||
|
// For users who need more control, a funcr.Formatter can be embedded inside
|
||||||
|
// your own custom LogSink implementation. This is useful when the LogSink
|
||||||
|
// needs to implement additional methods, for example.
|
||||||
|
//
|
||||||
|
// Formatting
|
||||||
|
//
|
||||||
|
// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for
|
||||||
|
// values which are being logged. When rendering a struct, funcr will use Go's
|
||||||
|
// standard JSON tags (all except "string").
|
||||||
|
package funcr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding"
|
||||||
|
"fmt"
|
||||||
|
"path/filepath"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// New returns a logr.Logger which is implemented by an arbitrary function.
|
||||||
|
func New(fn func(prefix, args string), opts Options) logr.Logger {
|
||||||
|
return logr.New(newSink(fn, NewFormatter(opts)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewJSON returns a logr.Logger which is implemented by an arbitrary function
|
||||||
|
// and produces JSON output.
|
||||||
|
func NewJSON(fn func(obj string), opts Options) logr.Logger {
|
||||||
|
fnWrapper := func(_, obj string) {
|
||||||
|
fn(obj)
|
||||||
|
}
|
||||||
|
return logr.New(newSink(fnWrapper, NewFormatterJSON(opts)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Underlier exposes access to the underlying logging function. Since
|
||||||
|
// callers only have a logr.Logger, they have to know which
|
||||||
|
// implementation is in use, so this interface is less of an
|
||||||
|
// abstraction and more of a way to test type conversion.
|
||||||
|
type Underlier interface {
|
||||||
|
GetUnderlying() func(prefix, args string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink {
|
||||||
|
l := &fnlogger{
|
||||||
|
Formatter: formatter,
|
||||||
|
write: fn,
|
||||||
|
}
|
||||||
|
// For skipping fnlogger.Info and fnlogger.Error.
|
||||||
|
l.Formatter.AddCallDepth(1)
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options carries parameters which influence the way logs are generated.
|
||||||
|
type Options struct {
|
||||||
|
// LogCaller tells funcr to add a "caller" key to some or all log lines.
|
||||||
|
// This has some overhead, so some users might not want it.
|
||||||
|
LogCaller MessageClass
|
||||||
|
|
||||||
|
// LogCallerFunc tells funcr to also log the calling function name. This
|
||||||
|
// has no effect if caller logging is not enabled (see Options.LogCaller).
|
||||||
|
LogCallerFunc bool
|
||||||
|
|
||||||
|
// LogTimestamp tells funcr to add a "ts" key to log lines. This has some
|
||||||
|
// overhead, so some users might not want it.
|
||||||
|
LogTimestamp bool
|
||||||
|
|
||||||
|
// TimestampFormat tells funcr how to render timestamps when LogTimestamp
|
||||||
|
// is enabled. If not specified, a default format will be used. For more
|
||||||
|
// details, see docs for Go's time.Layout.
|
||||||
|
TimestampFormat string
|
||||||
|
|
||||||
|
// Verbosity tells funcr which V logs to produce. Higher values enable
|
||||||
|
// more logs. Info logs at or below this level will be written, while logs
|
||||||
|
// above this level will be discarded.
|
||||||
|
Verbosity int
|
||||||
|
|
||||||
|
// RenderBuiltinsHook allows users to mutate the list of key-value pairs
|
||||||
|
// while a log line is being rendered. The kvList argument follows logr
|
||||||
|
// conventions - each pair of slice elements is comprised of a string key
|
||||||
|
// and an arbitrary value (verified and sanitized before calling this
|
||||||
|
// hook). The value returned must follow the same conventions. This hook
|
||||||
|
// can be used to audit or modify logged data. For example, you might want
|
||||||
|
// to prefix all of funcr's built-in keys with some string. This hook is
|
||||||
|
// only called for built-in (provided by funcr itself) key-value pairs.
|
||||||
|
// Equivalent hooks are offered for key-value pairs saved via
|
||||||
|
// logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and
|
||||||
|
// for user-provided pairs (see RenderArgsHook).
|
||||||
|
RenderBuiltinsHook func(kvList []interface{}) []interface{}
|
||||||
|
|
||||||
|
// RenderValuesHook is the same as RenderBuiltinsHook, except that it is
|
||||||
|
// only called for key-value pairs saved via logr.Logger.WithValues. See
|
||||||
|
// RenderBuiltinsHook for more details.
|
||||||
|
RenderValuesHook func(kvList []interface{}) []interface{}
|
||||||
|
|
||||||
|
// RenderArgsHook is the same as RenderBuiltinsHook, except that it is only
|
||||||
|
// called for key-value pairs passed directly to Info and Error. See
|
||||||
|
// RenderBuiltinsHook for more details.
|
||||||
|
RenderArgsHook func(kvList []interface{}) []interface{}
|
||||||
|
|
||||||
|
// MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct
|
||||||
|
// that contains a struct, etc.) it may log. Every time it finds a struct,
|
||||||
|
// slice, array, or map the depth is increased by one. When the maximum is
|
||||||
|
// reached, the value will be converted to a string indicating that the max
|
||||||
|
// depth has been exceeded. If this field is not specified, a default
|
||||||
|
// value will be used.
|
||||||
|
MaxLogDepth int
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageClass indicates which category or categories of messages to consider.
|
||||||
|
type MessageClass int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// None ignores all message classes.
|
||||||
|
None MessageClass = iota
|
||||||
|
// All considers all message classes.
|
||||||
|
All
|
||||||
|
// Info only considers info messages.
|
||||||
|
Info
|
||||||
|
// Error only considers error messages.
|
||||||
|
Error
|
||||||
|
)
|
||||||
|
|
||||||
|
// fnlogger inherits some of its LogSink implementation from Formatter
|
||||||
|
// and just needs to add some glue code.
|
||||||
|
type fnlogger struct {
|
||||||
|
Formatter
|
||||||
|
write func(prefix, args string)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) WithName(name string) logr.LogSink {
|
||||||
|
l.Formatter.AddName(name)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink {
|
||||||
|
l.Formatter.AddValues(kvList)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) WithCallDepth(depth int) logr.LogSink {
|
||||||
|
l.Formatter.AddCallDepth(depth)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) Info(level int, msg string, kvList ...interface{}) {
|
||||||
|
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||||
|
l.write(prefix, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) Error(err error, msg string, kvList ...interface{}) {
|
||||||
|
prefix, args := l.FormatError(err, msg, kvList)
|
||||||
|
l.write(prefix, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l fnlogger) GetUnderlying() func(prefix, args string) {
|
||||||
|
return l.write
|
||||||
|
}
|
||||||
|
|
||||||
|
// Assert conformance to the interfaces.
|
||||||
|
var _ logr.LogSink = &fnlogger{}
|
||||||
|
var _ logr.CallDepthLogSink = &fnlogger{}
|
||||||
|
var _ Underlier = &fnlogger{}
|
||||||
|
|
||||||
|
// NewFormatter constructs a Formatter which emits a JSON-like key=value format.
|
||||||
|
func NewFormatter(opts Options) Formatter {
|
||||||
|
return newFormatter(opts, outputKeyValue)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewFormatterJSON constructs a Formatter which emits strict JSON.
|
||||||
|
func NewFormatterJSON(opts Options) Formatter {
|
||||||
|
return newFormatter(opts, outputJSON)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Defaults for Options.
|
||||||
|
const defaultTimestampFormat = "2006-01-02 15:04:05.000000"
|
||||||
|
const defaultMaxLogDepth = 16
|
||||||
|
|
||||||
|
func newFormatter(opts Options, outfmt outputFormat) Formatter {
|
||||||
|
if opts.TimestampFormat == "" {
|
||||||
|
opts.TimestampFormat = defaultTimestampFormat
|
||||||
|
}
|
||||||
|
if opts.MaxLogDepth == 0 {
|
||||||
|
opts.MaxLogDepth = defaultMaxLogDepth
|
||||||
|
}
|
||||||
|
f := Formatter{
|
||||||
|
outputFormat: outfmt,
|
||||||
|
prefix: "",
|
||||||
|
values: nil,
|
||||||
|
depth: 0,
|
||||||
|
opts: opts,
|
||||||
|
}
|
||||||
|
return f
|
||||||
|
}
|
||||||
|
|
||||||
|
// Formatter is an opaque struct which can be embedded in a LogSink
|
||||||
|
// implementation. It should be constructed with NewFormatter. Some of
|
||||||
|
// its methods directly implement logr.LogSink.
|
||||||
|
type Formatter struct {
|
||||||
|
outputFormat outputFormat
|
||||||
|
prefix string
|
||||||
|
values []interface{}
|
||||||
|
valuesStr string
|
||||||
|
depth int
|
||||||
|
opts Options
|
||||||
|
}
|
||||||
|
|
||||||
|
// outputFormat indicates which outputFormat to use.
|
||||||
|
type outputFormat int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// outputKeyValue emits a JSON-like key=value format, but not strict JSON.
|
||||||
|
outputKeyValue outputFormat = iota
|
||||||
|
// outputJSON emits strict JSON.
|
||||||
|
outputJSON
|
||||||
|
)
|
||||||
|
|
||||||
|
// PseudoStruct is a list of key-value pairs that gets logged as a struct.
|
||||||
|
type PseudoStruct []interface{}
|
||||||
|
|
||||||
|
// render produces a log line, ready to use.
|
||||||
|
func (f Formatter) render(builtins, args []interface{}) string {
|
||||||
|
// Empirically bytes.Buffer is faster than strings.Builder for this.
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
buf.WriteByte('{')
|
||||||
|
}
|
||||||
|
vals := builtins
|
||||||
|
if hook := f.opts.RenderBuiltinsHook; hook != nil {
|
||||||
|
vals = hook(f.sanitize(vals))
|
||||||
|
}
|
||||||
|
f.flatten(buf, vals, false, false) // keys are ours, no need to escape
|
||||||
|
continuing := len(builtins) > 0
|
||||||
|
if len(f.valuesStr) > 0 {
|
||||||
|
if continuing {
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
buf.WriteByte(',')
|
||||||
|
} else {
|
||||||
|
buf.WriteByte(' ')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
continuing = true
|
||||||
|
buf.WriteString(f.valuesStr)
|
||||||
|
}
|
||||||
|
vals = args
|
||||||
|
if hook := f.opts.RenderArgsHook; hook != nil {
|
||||||
|
vals = hook(f.sanitize(vals))
|
||||||
|
}
|
||||||
|
f.flatten(buf, vals, continuing, true) // escape user-provided keys
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
buf.WriteByte('}')
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// flatten renders a list of key-value pairs into a buffer. If continuing is
|
||||||
|
// true, it assumes that the buffer has previous values and will emit a
|
||||||
|
// separator (which depends on the output format) before the first pair it
|
||||||
|
// writes. If escapeKeys is true, the keys are assumed to have
|
||||||
|
// non-JSON-compatible characters in them and must be evaluated for escapes.
|
||||||
|
//
|
||||||
|
// This function returns a potentially modified version of kvList, which
|
||||||
|
// ensures that there is a value for every key (adding a value if needed) and
|
||||||
|
// that each key is a string (substituting a key if needed).
|
||||||
|
func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} {
|
||||||
|
// This logic overlaps with sanitize() but saves one type-cast per key,
|
||||||
|
// which can be measurable.
|
||||||
|
if len(kvList)%2 != 0 {
|
||||||
|
kvList = append(kvList, noValue)
|
||||||
|
}
|
||||||
|
for i := 0; i < len(kvList); i += 2 {
|
||||||
|
k, ok := kvList[i].(string)
|
||||||
|
if !ok {
|
||||||
|
k = f.nonStringKey(kvList[i])
|
||||||
|
kvList[i] = k
|
||||||
|
}
|
||||||
|
v := kvList[i+1]
|
||||||
|
|
||||||
|
if i > 0 || continuing {
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
buf.WriteByte(',')
|
||||||
|
} else {
|
||||||
|
// In theory the format could be something we don't understand. In
|
||||||
|
// practice, we control it, so it won't be.
|
||||||
|
buf.WriteByte(' ')
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if escapeKeys {
|
||||||
|
buf.WriteString(prettyString(k))
|
||||||
|
} else {
|
||||||
|
// this is faster
|
||||||
|
buf.WriteByte('"')
|
||||||
|
buf.WriteString(k)
|
||||||
|
buf.WriteByte('"')
|
||||||
|
}
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
buf.WriteByte(':')
|
||||||
|
} else {
|
||||||
|
buf.WriteByte('=')
|
||||||
|
}
|
||||||
|
buf.WriteString(f.pretty(v))
|
||||||
|
}
|
||||||
|
return kvList
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) pretty(value interface{}) string {
|
||||||
|
return f.prettyWithFlags(value, 0, 0)
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
flagRawStruct = 0x1 // do not print braces on structs
|
||||||
|
)
|
||||||
|
|
||||||
|
// TODO: This is not fast. Most of the overhead goes here.
|
||||||
|
func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string {
|
||||||
|
if depth > f.opts.MaxLogDepth {
|
||||||
|
return `"<max-log-depth-exceeded>"`
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle types that take full control of logging.
|
||||||
|
if v, ok := value.(logr.Marshaler); ok {
|
||||||
|
// Replace the value with what the type wants to get logged.
|
||||||
|
// That then gets handled below via reflection.
|
||||||
|
value = invokeMarshaler(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle types that want to format themselves.
|
||||||
|
switch v := value.(type) {
|
||||||
|
case fmt.Stringer:
|
||||||
|
value = invokeStringer(v)
|
||||||
|
case error:
|
||||||
|
value = invokeError(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handling the most common types without reflect is a small perf win.
|
||||||
|
switch v := value.(type) {
|
||||||
|
case bool:
|
||||||
|
return strconv.FormatBool(v)
|
||||||
|
case string:
|
||||||
|
return prettyString(v)
|
||||||
|
case int:
|
||||||
|
return strconv.FormatInt(int64(v), 10)
|
||||||
|
case int8:
|
||||||
|
return strconv.FormatInt(int64(v), 10)
|
||||||
|
case int16:
|
||||||
|
return strconv.FormatInt(int64(v), 10)
|
||||||
|
case int32:
|
||||||
|
return strconv.FormatInt(int64(v), 10)
|
||||||
|
case int64:
|
||||||
|
return strconv.FormatInt(int64(v), 10)
|
||||||
|
case uint:
|
||||||
|
return strconv.FormatUint(uint64(v), 10)
|
||||||
|
case uint8:
|
||||||
|
return strconv.FormatUint(uint64(v), 10)
|
||||||
|
case uint16:
|
||||||
|
return strconv.FormatUint(uint64(v), 10)
|
||||||
|
case uint32:
|
||||||
|
return strconv.FormatUint(uint64(v), 10)
|
||||||
|
case uint64:
|
||||||
|
return strconv.FormatUint(v, 10)
|
||||||
|
case uintptr:
|
||||||
|
return strconv.FormatUint(uint64(v), 10)
|
||||||
|
case float32:
|
||||||
|
return strconv.FormatFloat(float64(v), 'f', -1, 32)
|
||||||
|
case float64:
|
||||||
|
return strconv.FormatFloat(v, 'f', -1, 64)
|
||||||
|
case complex64:
|
||||||
|
return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"`
|
||||||
|
case complex128:
|
||||||
|
return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"`
|
||||||
|
case PseudoStruct:
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
v = f.sanitize(v)
|
||||||
|
if flags&flagRawStruct == 0 {
|
||||||
|
buf.WriteByte('{')
|
||||||
|
}
|
||||||
|
for i := 0; i < len(v); i += 2 {
|
||||||
|
if i > 0 {
|
||||||
|
buf.WriteByte(',')
|
||||||
|
}
|
||||||
|
k, _ := v[i].(string) // sanitize() above means no need to check success
|
||||||
|
// arbitrary keys might need escaping
|
||||||
|
buf.WriteString(prettyString(k))
|
||||||
|
buf.WriteByte(':')
|
||||||
|
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
||||||
|
}
|
||||||
|
if flags&flagRawStruct == 0 {
|
||||||
|
buf.WriteByte('}')
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 256))
|
||||||
|
t := reflect.TypeOf(value)
|
||||||
|
if t == nil {
|
||||||
|
return "null"
|
||||||
|
}
|
||||||
|
v := reflect.ValueOf(value)
|
||||||
|
switch t.Kind() {
|
||||||
|
case reflect.Bool:
|
||||||
|
return strconv.FormatBool(v.Bool())
|
||||||
|
case reflect.String:
|
||||||
|
return prettyString(v.String())
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return strconv.FormatInt(int64(v.Int()), 10)
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return strconv.FormatUint(uint64(v.Uint()), 10)
|
||||||
|
case reflect.Float32:
|
||||||
|
return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32)
|
||||||
|
case reflect.Float64:
|
||||||
|
return strconv.FormatFloat(v.Float(), 'f', -1, 64)
|
||||||
|
case reflect.Complex64:
|
||||||
|
return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"`
|
||||||
|
case reflect.Complex128:
|
||||||
|
return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"`
|
||||||
|
case reflect.Struct:
|
||||||
|
if flags&flagRawStruct == 0 {
|
||||||
|
buf.WriteByte('{')
|
||||||
|
}
|
||||||
|
for i := 0; i < t.NumField(); i++ {
|
||||||
|
fld := t.Field(i)
|
||||||
|
if fld.PkgPath != "" {
|
||||||
|
// reflect says this field is only defined for non-exported fields.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if !v.Field(i).CanInterface() {
|
||||||
|
// reflect isn't clear exactly what this means, but we can't use it.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
name := ""
|
||||||
|
omitempty := false
|
||||||
|
if tag, found := fld.Tag.Lookup("json"); found {
|
||||||
|
if tag == "-" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if comma := strings.Index(tag, ","); comma != -1 {
|
||||||
|
if n := tag[:comma]; n != "" {
|
||||||
|
name = n
|
||||||
|
}
|
||||||
|
rest := tag[comma:]
|
||||||
|
if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") {
|
||||||
|
omitempty = true
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
name = tag
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if omitempty && isEmpty(v.Field(i)) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if i > 0 {
|
||||||
|
buf.WriteByte(',')
|
||||||
|
}
|
||||||
|
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
|
||||||
|
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1))
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if name == "" {
|
||||||
|
name = fld.Name
|
||||||
|
}
|
||||||
|
// field names can't contain characters which need escaping
|
||||||
|
buf.WriteByte('"')
|
||||||
|
buf.WriteString(name)
|
||||||
|
buf.WriteByte('"')
|
||||||
|
buf.WriteByte(':')
|
||||||
|
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
|
||||||
|
}
|
||||||
|
if flags&flagRawStruct == 0 {
|
||||||
|
buf.WriteByte('}')
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
case reflect.Slice, reflect.Array:
|
||||||
|
buf.WriteByte('[')
|
||||||
|
for i := 0; i < v.Len(); i++ {
|
||||||
|
if i > 0 {
|
||||||
|
buf.WriteByte(',')
|
||||||
|
}
|
||||||
|
e := v.Index(i)
|
||||||
|
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
|
||||||
|
}
|
||||||
|
buf.WriteByte(']')
|
||||||
|
return buf.String()
|
||||||
|
case reflect.Map:
|
||||||
|
buf.WriteByte('{')
|
||||||
|
// This does not sort the map keys, for best perf.
|
||||||
|
it := v.MapRange()
|
||||||
|
i := 0
|
||||||
|
for it.Next() {
|
||||||
|
if i > 0 {
|
||||||
|
buf.WriteByte(',')
|
||||||
|
}
|
||||||
|
// If a map key supports TextMarshaler, use it.
|
||||||
|
keystr := ""
|
||||||
|
if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok {
|
||||||
|
txt, err := m.MarshalText()
|
||||||
|
if err != nil {
|
||||||
|
keystr = fmt.Sprintf("<error-MarshalText: %s>", err.Error())
|
||||||
|
} else {
|
||||||
|
keystr = string(txt)
|
||||||
|
}
|
||||||
|
keystr = prettyString(keystr)
|
||||||
|
} else {
|
||||||
|
// prettyWithFlags will produce already-escaped values
|
||||||
|
keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1)
|
||||||
|
if t.Key().Kind() != reflect.String {
|
||||||
|
// JSON only does string keys. Unlike Go's standard JSON, we'll
|
||||||
|
// convert just about anything to a string.
|
||||||
|
keystr = prettyString(keystr)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
buf.WriteString(keystr)
|
||||||
|
buf.WriteByte(':')
|
||||||
|
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
buf.WriteByte('}')
|
||||||
|
return buf.String()
|
||||||
|
case reflect.Ptr, reflect.Interface:
|
||||||
|
if v.IsNil() {
|
||||||
|
return "null"
|
||||||
|
}
|
||||||
|
return f.prettyWithFlags(v.Elem().Interface(), 0, depth)
|
||||||
|
}
|
||||||
|
return fmt.Sprintf(`"<unhandled-%s>"`, t.Kind().String())
|
||||||
|
}
|
||||||
|
|
||||||
|
func prettyString(s string) string {
|
||||||
|
// Avoid escaping (which does allocations) if we can.
|
||||||
|
if needsEscape(s) {
|
||||||
|
return strconv.Quote(s)
|
||||||
|
}
|
||||||
|
b := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
b.WriteByte('"')
|
||||||
|
b.WriteString(s)
|
||||||
|
b.WriteByte('"')
|
||||||
|
return b.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// needsEscape determines whether the input string needs to be escaped or not,
|
||||||
|
// without doing any allocations.
|
||||||
|
func needsEscape(s string) bool {
|
||||||
|
for _, r := range s {
|
||||||
|
if !strconv.IsPrint(r) || r == '\\' || r == '"' {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isEmpty(v reflect.Value) bool {
|
||||||
|
switch v.Kind() {
|
||||||
|
case reflect.Array, reflect.Map, reflect.Slice, reflect.String:
|
||||||
|
return v.Len() == 0
|
||||||
|
case reflect.Bool:
|
||||||
|
return !v.Bool()
|
||||||
|
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||||
|
return v.Int() == 0
|
||||||
|
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||||
|
return v.Uint() == 0
|
||||||
|
case reflect.Float32, reflect.Float64:
|
||||||
|
return v.Float() == 0
|
||||||
|
case reflect.Complex64, reflect.Complex128:
|
||||||
|
return v.Complex() == 0
|
||||||
|
case reflect.Interface, reflect.Ptr:
|
||||||
|
return v.IsNil()
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ret = fmt.Sprintf("<panic: %s>", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return m.MarshalLog()
|
||||||
|
}
|
||||||
|
|
||||||
|
func invokeStringer(s fmt.Stringer) (ret string) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ret = fmt.Sprintf("<panic: %s>", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return s.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
func invokeError(e error) (ret string) {
|
||||||
|
defer func() {
|
||||||
|
if r := recover(); r != nil {
|
||||||
|
ret = fmt.Sprintf("<panic: %s>", r)
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
return e.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Caller represents the original call site for a log line, after considering
|
||||||
|
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
|
||||||
|
// Line fields will always be provided, while the Func field is optional.
|
||||||
|
// Users can set the render hook fields in Options to examine logged key-value
|
||||||
|
// pairs, one of which will be {"caller", Caller} if the Options.LogCaller
|
||||||
|
// field is enabled for the given MessageClass.
|
||||||
|
type Caller struct {
|
||||||
|
// File is the basename of the file for this call site.
|
||||||
|
File string `json:"file"`
|
||||||
|
// Line is the line number in the file for this call site.
|
||||||
|
Line int `json:"line"`
|
||||||
|
// Func is the function name for this call site, or empty if
|
||||||
|
// Options.LogCallerFunc is not enabled.
|
||||||
|
Func string `json:"function,omitempty"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f Formatter) caller() Caller {
|
||||||
|
// +1 for this frame, +1 for Info/Error.
|
||||||
|
pc, file, line, ok := runtime.Caller(f.depth + 2)
|
||||||
|
if !ok {
|
||||||
|
return Caller{"<unknown>", 0, ""}
|
||||||
|
}
|
||||||
|
fn := ""
|
||||||
|
if f.opts.LogCallerFunc {
|
||||||
|
if fp := runtime.FuncForPC(pc); fp != nil {
|
||||||
|
fn = fp.Name()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return Caller{filepath.Base(file), line, fn}
|
||||||
|
}
|
||||||
|
|
||||||
|
const noValue = "<no-value>"
|
||||||
|
|
||||||
|
func (f Formatter) nonStringKey(v interface{}) string {
|
||||||
|
return fmt.Sprintf("<non-string-key: %s>", f.snippet(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// snippet produces a short snippet string of an arbitrary value.
|
||||||
|
func (f Formatter) snippet(v interface{}) string {
|
||||||
|
const snipLen = 16
|
||||||
|
|
||||||
|
snip := f.pretty(v)
|
||||||
|
if len(snip) > snipLen {
|
||||||
|
snip = snip[:snipLen]
|
||||||
|
}
|
||||||
|
return snip
|
||||||
|
}
|
||||||
|
|
||||||
|
// sanitize ensures that a list of key-value pairs has a value for every key
|
||||||
|
// (adding a value if needed) and that each key is a string (substituting a key
|
||||||
|
// if needed).
|
||||||
|
func (f Formatter) sanitize(kvList []interface{}) []interface{} {
|
||||||
|
if len(kvList)%2 != 0 {
|
||||||
|
kvList = append(kvList, noValue)
|
||||||
|
}
|
||||||
|
for i := 0; i < len(kvList); i += 2 {
|
||||||
|
_, ok := kvList[i].(string)
|
||||||
|
if !ok {
|
||||||
|
kvList[i] = f.nonStringKey(kvList[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return kvList
|
||||||
|
}
|
||||||
|
|
||||||
|
// Init configures this Formatter from runtime info, such as the call depth
|
||||||
|
// imposed by logr itself.
|
||||||
|
// Note that this receiver is a pointer, so depth can be saved.
|
||||||
|
func (f *Formatter) Init(info logr.RuntimeInfo) {
|
||||||
|
f.depth += info.CallDepth
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled checks whether an info message at the given level should be logged.
|
||||||
|
func (f Formatter) Enabled(level int) bool {
|
||||||
|
return level <= f.opts.Verbosity
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetDepth returns the current depth of this Formatter. This is useful for
|
||||||
|
// implementations which do their own caller attribution.
|
||||||
|
func (f Formatter) GetDepth() int {
|
||||||
|
return f.depth
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatInfo renders an Info log message into strings. The prefix will be
|
||||||
|
// empty when no names were set (via AddNames), or when the output is
|
||||||
|
// configured for JSON.
|
||||||
|
func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) {
|
||||||
|
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
||||||
|
prefix = f.prefix
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
args = append(args, "logger", prefix)
|
||||||
|
prefix = ""
|
||||||
|
}
|
||||||
|
if f.opts.LogTimestamp {
|
||||||
|
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||||
|
}
|
||||||
|
if policy := f.opts.LogCaller; policy == All || policy == Info {
|
||||||
|
args = append(args, "caller", f.caller())
|
||||||
|
}
|
||||||
|
args = append(args, "level", level, "msg", msg)
|
||||||
|
return prefix, f.render(args, kvList)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FormatError renders an Error log message into strings. The prefix will be
|
||||||
|
// empty when no names were set (via AddNames), or when the output is
|
||||||
|
// configured for JSON.
|
||||||
|
func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) {
|
||||||
|
args := make([]interface{}, 0, 64) // using a constant here impacts perf
|
||||||
|
prefix = f.prefix
|
||||||
|
if f.outputFormat == outputJSON {
|
||||||
|
args = append(args, "logger", prefix)
|
||||||
|
prefix = ""
|
||||||
|
}
|
||||||
|
if f.opts.LogTimestamp {
|
||||||
|
args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat))
|
||||||
|
}
|
||||||
|
if policy := f.opts.LogCaller; policy == All || policy == Error {
|
||||||
|
args = append(args, "caller", f.caller())
|
||||||
|
}
|
||||||
|
args = append(args, "msg", msg)
|
||||||
|
var loggableErr interface{}
|
||||||
|
if err != nil {
|
||||||
|
loggableErr = err.Error()
|
||||||
|
}
|
||||||
|
args = append(args, "error", loggableErr)
|
||||||
|
return f.prefix, f.render(args, kvList)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddName appends the specified name. funcr uses '/' characters to separate
|
||||||
|
// name elements. Callers should not pass '/' in the provided name string, but
|
||||||
|
// this library does not actually enforce that.
|
||||||
|
func (f *Formatter) AddName(name string) {
|
||||||
|
if len(f.prefix) > 0 {
|
||||||
|
f.prefix += "/"
|
||||||
|
}
|
||||||
|
f.prefix += name
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddValues adds key-value pairs to the set of saved values to be logged with
|
||||||
|
// each log line.
|
||||||
|
func (f *Formatter) AddValues(kvList []interface{}) {
|
||||||
|
// Three slice args forces a copy.
|
||||||
|
n := len(f.values)
|
||||||
|
f.values = append(f.values[:n:n], kvList...)
|
||||||
|
|
||||||
|
vals := f.values
|
||||||
|
if hook := f.opts.RenderValuesHook; hook != nil {
|
||||||
|
vals = hook(f.sanitize(vals))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pre-render values, so we don't have to do it on each Info/Error call.
|
||||||
|
buf := bytes.NewBuffer(make([]byte, 0, 1024))
|
||||||
|
f.flatten(buf, vals, false, true) // escape user-provided keys
|
||||||
|
f.valuesStr = buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// AddCallDepth increases the number of stack-frames to skip when attributing
|
||||||
|
// the log line to a file and line.
|
||||||
|
func (f *Formatter) AddCallDepth(depth int) {
|
||||||
|
f.depth += depth
|
||||||
|
}
|
|
@ -0,0 +1,510 @@
|
||||||
|
/*
|
||||||
|
Copyright 2019 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// This design derives from Dave Cheney's blog:
|
||||||
|
// http://dave.cheney.net/2015/11/05/lets-talk-about-logging
|
||||||
|
|
||||||
|
// Package logr defines a general-purpose logging API and abstract interfaces
|
||||||
|
// to back that API. Packages in the Go ecosystem can depend on this package,
|
||||||
|
// while callers can implement logging with whatever backend is appropriate.
|
||||||
|
//
|
||||||
|
// Usage
|
||||||
|
//
|
||||||
|
// Logging is done using a Logger instance. Logger is a concrete type with
|
||||||
|
// methods, which defers the actual logging to a LogSink interface. The main
|
||||||
|
// methods of Logger are Info() and Error(). Arguments to Info() and Error()
|
||||||
|
// are key/value pairs rather than printf-style formatted strings, emphasizing
|
||||||
|
// "structured logging".
|
||||||
|
//
|
||||||
|
// With Go's standard log package, we might write:
|
||||||
|
// log.Printf("setting target value %s", targetValue)
|
||||||
|
//
|
||||||
|
// With logr's structured logging, we'd write:
|
||||||
|
// logger.Info("setting target", "value", targetValue)
|
||||||
|
//
|
||||||
|
// Errors are much the same. Instead of:
|
||||||
|
// log.Printf("failed to open the pod bay door for user %s: %v", user, err)
|
||||||
|
//
|
||||||
|
// We'd write:
|
||||||
|
// logger.Error(err, "failed to open the pod bay door", "user", user)
|
||||||
|
//
|
||||||
|
// Info() and Error() are very similar, but they are separate methods so that
|
||||||
|
// LogSink implementations can choose to do things like attach additional
|
||||||
|
// information (such as stack traces) on calls to Error(). Error() messages are
|
||||||
|
// always logged, regardless of the current verbosity. If there is no error
|
||||||
|
// instance available, passing nil is valid.
|
||||||
|
//
|
||||||
|
// Verbosity
|
||||||
|
//
|
||||||
|
// Often we want to log information only when the application in "verbose
|
||||||
|
// mode". To write log lines that are more verbose, Logger has a V() method.
|
||||||
|
// The higher the V-level of a log line, the less critical it is considered.
|
||||||
|
// Log-lines with V-levels that are not enabled (as per the LogSink) will not
|
||||||
|
// be written. Level V(0) is the default, and logger.V(0).Info() has the same
|
||||||
|
// meaning as logger.Info(). Negative V-levels have the same meaning as V(0).
|
||||||
|
// Error messages do not have a verbosity level and are always logged.
|
||||||
|
//
|
||||||
|
// Where we might have written:
|
||||||
|
// if flVerbose >= 2 {
|
||||||
|
// log.Printf("an unusual thing happened")
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// We can write:
|
||||||
|
// logger.V(2).Info("an unusual thing happened")
|
||||||
|
//
|
||||||
|
// Logger Names
|
||||||
|
//
|
||||||
|
// Logger instances can have name strings so that all messages logged through
|
||||||
|
// that instance have additional context. For example, you might want to add
|
||||||
|
// a subsystem name:
|
||||||
|
//
|
||||||
|
// logger.WithName("compactor").Info("started", "time", time.Now())
|
||||||
|
//
|
||||||
|
// The WithName() method returns a new Logger, which can be passed to
|
||||||
|
// constructors or other functions for further use. Repeated use of WithName()
|
||||||
|
// will accumulate name "segments". These name segments will be joined in some
|
||||||
|
// way by the LogSink implementation. It is strongly recommended that name
|
||||||
|
// segments contain simple identifiers (letters, digits, and hyphen), and do
|
||||||
|
// not contain characters that could muddle the log output or confuse the
|
||||||
|
// joining operation (e.g. whitespace, commas, periods, slashes, brackets,
|
||||||
|
// quotes, etc).
|
||||||
|
//
|
||||||
|
// Saved Values
|
||||||
|
//
|
||||||
|
// Logger instances can store any number of key/value pairs, which will be
|
||||||
|
// logged alongside all messages logged through that instance. For example,
|
||||||
|
// you might want to create a Logger instance per managed object:
|
||||||
|
//
|
||||||
|
// With the standard log package, we might write:
|
||||||
|
// log.Printf("decided to set field foo to value %q for object %s/%s",
|
||||||
|
// targetValue, object.Namespace, object.Name)
|
||||||
|
//
|
||||||
|
// With logr we'd write:
|
||||||
|
// // Elsewhere: set up the logger to log the object name.
|
||||||
|
// obj.logger = mainLogger.WithValues(
|
||||||
|
// "name", obj.name, "namespace", obj.namespace)
|
||||||
|
//
|
||||||
|
// // later on...
|
||||||
|
// obj.logger.Info("setting foo", "value", targetValue)
|
||||||
|
//
|
||||||
|
// Best Practices
|
||||||
|
//
|
||||||
|
// Logger has very few hard rules, with the goal that LogSink implementations
|
||||||
|
// might have a lot of freedom to differentiate. There are, however, some
|
||||||
|
// things to consider.
|
||||||
|
//
|
||||||
|
// The log message consists of a constant message attached to the log line.
|
||||||
|
// This should generally be a simple description of what's occurring, and should
|
||||||
|
// never be a format string. Variable information can then be attached using
|
||||||
|
// named values.
|
||||||
|
//
|
||||||
|
// Keys are arbitrary strings, but should generally be constant values. Values
|
||||||
|
// may be any Go value, but how the value is formatted is determined by the
|
||||||
|
// LogSink implementation.
|
||||||
|
//
|
||||||
|
// Logger instances are meant to be passed around by value. Code that receives
|
||||||
|
// such a value can call its methods without having to check whether the
|
||||||
|
// instance is ready for use.
|
||||||
|
//
|
||||||
|
// Calling methods with the null logger (Logger{}) as instance will crash
|
||||||
|
// because it has no LogSink. Therefore this null logger should never be passed
|
||||||
|
// around. For cases where passing a logger is optional, a pointer to Logger
|
||||||
|
// should be used.
|
||||||
|
//
|
||||||
|
// Key Naming Conventions
|
||||||
|
//
|
||||||
|
// Keys are not strictly required to conform to any specification or regex, but
|
||||||
|
// it is recommended that they:
|
||||||
|
// * be human-readable and meaningful (not auto-generated or simple ordinals)
|
||||||
|
// * be constant (not dependent on input data)
|
||||||
|
// * contain only printable characters
|
||||||
|
// * not contain whitespace or punctuation
|
||||||
|
// * use lower case for simple keys and lowerCamelCase for more complex ones
|
||||||
|
//
|
||||||
|
// These guidelines help ensure that log data is processed properly regardless
|
||||||
|
// of the log implementation. For example, log implementations will try to
|
||||||
|
// output JSON data or will store data for later database (e.g. SQL) queries.
|
||||||
|
//
|
||||||
|
// While users are generally free to use key names of their choice, it's
|
||||||
|
// generally best to avoid using the following keys, as they're frequently used
|
||||||
|
// by implementations:
|
||||||
|
// * "caller": the calling information (file/line) of a particular log line
|
||||||
|
// * "error": the underlying error value in the `Error` method
|
||||||
|
// * "level": the log level
|
||||||
|
// * "logger": the name of the associated logger
|
||||||
|
// * "msg": the log message
|
||||||
|
// * "stacktrace": the stack trace associated with a particular log line or
|
||||||
|
// error (often from the `Error` message)
|
||||||
|
// * "ts": the timestamp for a log line
|
||||||
|
//
|
||||||
|
// Implementations are encouraged to make use of these keys to represent the
|
||||||
|
// above concepts, when necessary (for example, in a pure-JSON output form, it
|
||||||
|
// would be necessary to represent at least message and timestamp as ordinary
|
||||||
|
// named values).
|
||||||
|
//
|
||||||
|
// Break Glass
|
||||||
|
//
|
||||||
|
// Implementations may choose to give callers access to the underlying
|
||||||
|
// logging implementation. The recommended pattern for this is:
|
||||||
|
// // Underlier exposes access to the underlying logging implementation.
|
||||||
|
// // Since callers only have a logr.Logger, they have to know which
|
||||||
|
// // implementation is in use, so this interface is less of an abstraction
|
||||||
|
// // and more of way to test type conversion.
|
||||||
|
// type Underlier interface {
|
||||||
|
// GetUnderlying() <underlying-type>
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Logger grants access to the sink to enable type assertions like this:
|
||||||
|
// func DoSomethingWithImpl(log logr.Logger) {
|
||||||
|
// if underlier, ok := log.GetSink()(impl.Underlier) {
|
||||||
|
// implLogger := underlier.GetUnderlying()
|
||||||
|
// ...
|
||||||
|
// }
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Custom `With*` functions can be implemented by copying the complete
|
||||||
|
// Logger struct and replacing the sink in the copy:
|
||||||
|
// // WithFooBar changes the foobar parameter in the log sink and returns a
|
||||||
|
// // new logger with that modified sink. It does nothing for loggers where
|
||||||
|
// // the sink doesn't support that parameter.
|
||||||
|
// func WithFoobar(log logr.Logger, foobar int) logr.Logger {
|
||||||
|
// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok {
|
||||||
|
// log = log.WithSink(foobarLogSink.WithFooBar(foobar))
|
||||||
|
// }
|
||||||
|
// return log
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// Don't use New to construct a new Logger with a LogSink retrieved from an
|
||||||
|
// existing Logger. Source code attribution might not work correctly and
|
||||||
|
// unexported fields in Logger get lost.
|
||||||
|
//
|
||||||
|
// Beware that the same LogSink instance may be shared by different logger
|
||||||
|
// instances. Calling functions that modify the LogSink will affect all of
|
||||||
|
// those.
|
||||||
|
package logr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
)
|
||||||
|
|
||||||
|
// New returns a new Logger instance. This is primarily used by libraries
|
||||||
|
// implementing LogSink, rather than end users.
|
||||||
|
func New(sink LogSink) Logger {
|
||||||
|
logger := Logger{}
|
||||||
|
logger.setSink(sink)
|
||||||
|
sink.Init(runtimeInfo)
|
||||||
|
return logger
|
||||||
|
}
|
||||||
|
|
||||||
|
// setSink stores the sink and updates any related fields. It mutates the
|
||||||
|
// logger and thus is only safe to use for loggers that are not currently being
|
||||||
|
// used concurrently.
|
||||||
|
func (l *Logger) setSink(sink LogSink) {
|
||||||
|
l.sink = sink
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetSink returns the stored sink.
|
||||||
|
func (l Logger) GetSink() LogSink {
|
||||||
|
return l.sink
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithSink returns a copy of the logger with the new sink.
|
||||||
|
func (l Logger) WithSink(sink LogSink) Logger {
|
||||||
|
l.setSink(sink)
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Logger is an interface to an abstract logging implementation. This is a
|
||||||
|
// concrete type for performance reasons, but all the real work is passed on to
|
||||||
|
// a LogSink. Implementations of LogSink should provide their own constructors
|
||||||
|
// that return Logger, not LogSink.
|
||||||
|
//
|
||||||
|
// The underlying sink can be accessed through GetSink and be modified through
|
||||||
|
// WithSink. This enables the implementation of custom extensions (see "Break
|
||||||
|
// Glass" in the package documentation). Normally the sink should be used only
|
||||||
|
// indirectly.
|
||||||
|
type Logger struct {
|
||||||
|
sink LogSink
|
||||||
|
level int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enabled tests whether this Logger is enabled. For example, commandline
|
||||||
|
// flags might be used to set the logging verbosity and disable some info logs.
|
||||||
|
func (l Logger) Enabled() bool {
|
||||||
|
return l.sink.Enabled(l.level)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Info logs a non-error message with the given key/value pairs as context.
|
||||||
|
//
|
||||||
|
// The msg argument should be used to add some constant description to the log
|
||||||
|
// line. The key/value pairs can then be used to add additional variable
|
||||||
|
// information. The key/value pairs must alternate string keys and arbitrary
|
||||||
|
// values.
|
||||||
|
func (l Logger) Info(msg string, keysAndValues ...interface{}) {
|
||||||
|
if l.Enabled() {
|
||||||
|
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||||
|
withHelper.GetCallStackHelper()()
|
||||||
|
}
|
||||||
|
l.sink.Info(l.level, msg, keysAndValues...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Error logs an error, with the given message and key/value pairs as context.
|
||||||
|
// It functions similarly to Info, but may have unique behavior, and should be
|
||||||
|
// preferred for logging errors (see the package documentations for more
|
||||||
|
// information). The log message will always be emitted, regardless of
|
||||||
|
// verbosity level.
|
||||||
|
//
|
||||||
|
// The msg argument should be used to add context to any underlying error,
|
||||||
|
// while the err argument should be used to attach the actual error that
|
||||||
|
// triggered this log line, if present. The err parameter is optional
|
||||||
|
// and nil may be passed instead of an error instance.
|
||||||
|
func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) {
|
||||||
|
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||||
|
withHelper.GetCallStackHelper()()
|
||||||
|
}
|
||||||
|
l.sink.Error(err, msg, keysAndValues...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// V returns a new Logger instance for a specific verbosity level, relative to
|
||||||
|
// this Logger. In other words, V-levels are additive. A higher verbosity
|
||||||
|
// level means a log message is less important. Negative V-levels are treated
|
||||||
|
// as 0.
|
||||||
|
func (l Logger) V(level int) Logger {
|
||||||
|
if level < 0 {
|
||||||
|
level = 0
|
||||||
|
}
|
||||||
|
l.level += level
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithValues returns a new Logger instance with additional key/value pairs.
|
||||||
|
// See Info for documentation on how key/value pairs work.
|
||||||
|
func (l Logger) WithValues(keysAndValues ...interface{}) Logger {
|
||||||
|
l.setSink(l.sink.WithValues(keysAndValues...))
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithName returns a new Logger instance with the specified name element added
|
||||||
|
// to the Logger's name. Successive calls with WithName append additional
|
||||||
|
// suffixes to the Logger's name. It's strongly recommended that name segments
|
||||||
|
// contain only letters, digits, and hyphens (see the package documentation for
|
||||||
|
// more information).
|
||||||
|
func (l Logger) WithName(name string) Logger {
|
||||||
|
l.setSink(l.sink.WithName(name))
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCallDepth returns a Logger instance that offsets the call stack by the
|
||||||
|
// specified number of frames when logging call site information, if possible.
|
||||||
|
// This is useful for users who have helper functions between the "real" call
|
||||||
|
// site and the actual calls to Logger methods. If depth is 0 the attribution
|
||||||
|
// should be to the direct caller of this function. If depth is 1 the
|
||||||
|
// attribution should skip 1 call frame, and so on. Successive calls to this
|
||||||
|
// are additive.
|
||||||
|
//
|
||||||
|
// If the underlying log implementation supports a WithCallDepth(int) method,
|
||||||
|
// it will be called and the result returned. If the implementation does not
|
||||||
|
// support CallDepthLogSink, the original Logger will be returned.
|
||||||
|
//
|
||||||
|
// To skip one level, WithCallStackHelper() should be used instead of
|
||||||
|
// WithCallDepth(1) because it works with implementions that support the
|
||||||
|
// CallDepthLogSink and/or CallStackHelperLogSink interfaces.
|
||||||
|
func (l Logger) WithCallDepth(depth int) Logger {
|
||||||
|
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||||
|
l.setSink(withCallDepth.WithCallDepth(depth))
|
||||||
|
}
|
||||||
|
return l
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithCallStackHelper returns a new Logger instance that skips the direct
|
||||||
|
// caller when logging call site information, if possible. This is useful for
|
||||||
|
// users who have helper functions between the "real" call site and the actual
|
||||||
|
// calls to Logger methods and want to support loggers which depend on marking
|
||||||
|
// each individual helper function, like loggers based on testing.T.
|
||||||
|
//
|
||||||
|
// In addition to using that new logger instance, callers also must call the
|
||||||
|
// returned function.
|
||||||
|
//
|
||||||
|
// If the underlying log implementation supports a WithCallDepth(int) method,
|
||||||
|
// WithCallDepth(1) will be called to produce a new logger. If it supports a
|
||||||
|
// WithCallStackHelper() method, that will be also called. If the
|
||||||
|
// implementation does not support either of these, the original Logger will be
|
||||||
|
// returned.
|
||||||
|
func (l Logger) WithCallStackHelper() (func(), Logger) {
|
||||||
|
var helper func()
|
||||||
|
if withCallDepth, ok := l.sink.(CallDepthLogSink); ok {
|
||||||
|
l.setSink(withCallDepth.WithCallDepth(1))
|
||||||
|
}
|
||||||
|
if withHelper, ok := l.sink.(CallStackHelperLogSink); ok {
|
||||||
|
helper = withHelper.GetCallStackHelper()
|
||||||
|
} else {
|
||||||
|
helper = func() {}
|
||||||
|
}
|
||||||
|
return helper, l
|
||||||
|
}
|
||||||
|
|
||||||
|
// contextKey is how we find Loggers in a context.Context.
|
||||||
|
type contextKey struct{}
|
||||||
|
|
||||||
|
// FromContext returns a Logger from ctx or an error if no Logger is found.
|
||||||
|
func FromContext(ctx context.Context) (Logger, error) {
|
||||||
|
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||||
|
return v, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return Logger{}, notFoundError{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// notFoundError exists to carry an IsNotFound method.
|
||||||
|
type notFoundError struct{}
|
||||||
|
|
||||||
|
func (notFoundError) Error() string {
|
||||||
|
return "no logr.Logger was present"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (notFoundError) IsNotFound() bool {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
|
||||||
|
// returns a Logger that discards all log messages.
|
||||||
|
func FromContextOrDiscard(ctx context.Context) Logger {
|
||||||
|
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
|
||||||
|
return v
|
||||||
|
}
|
||||||
|
|
||||||
|
return Discard()
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewContext returns a new Context, derived from ctx, which carries the
|
||||||
|
// provided Logger.
|
||||||
|
func NewContext(ctx context.Context, logger Logger) context.Context {
|
||||||
|
return context.WithValue(ctx, contextKey{}, logger)
|
||||||
|
}
|
||||||
|
|
||||||
|
// RuntimeInfo holds information that the logr "core" library knows which
|
||||||
|
// LogSinks might want to know.
|
||||||
|
type RuntimeInfo struct {
|
||||||
|
// CallDepth is the number of call frames the logr library adds between the
|
||||||
|
// end-user and the LogSink. LogSink implementations which choose to print
|
||||||
|
// the original logging site (e.g. file & line) should climb this many
|
||||||
|
// additional frames to find it.
|
||||||
|
CallDepth int
|
||||||
|
}
|
||||||
|
|
||||||
|
// runtimeInfo is a static global. It must not be changed at run time.
|
||||||
|
var runtimeInfo = RuntimeInfo{
|
||||||
|
CallDepth: 1,
|
||||||
|
}
|
||||||
|
|
||||||
|
// LogSink represents a logging implementation. End-users will generally not
|
||||||
|
// interact with this type.
|
||||||
|
type LogSink interface {
|
||||||
|
// Init receives optional information about the logr library for LogSink
|
||||||
|
// implementations that need it.
|
||||||
|
Init(info RuntimeInfo)
|
||||||
|
|
||||||
|
// Enabled tests whether this LogSink is enabled at the specified V-level.
|
||||||
|
// For example, commandline flags might be used to set the logging
|
||||||
|
// verbosity and disable some info logs.
|
||||||
|
Enabled(level int) bool
|
||||||
|
|
||||||
|
// Info logs a non-error message with the given key/value pairs as context.
|
||||||
|
// The level argument is provided for optional logging. This method will
|
||||||
|
// only be called when Enabled(level) is true. See Logger.Info for more
|
||||||
|
// details.
|
||||||
|
Info(level int, msg string, keysAndValues ...interface{})
|
||||||
|
|
||||||
|
// Error logs an error, with the given message and key/value pairs as
|
||||||
|
// context. See Logger.Error for more details.
|
||||||
|
Error(err error, msg string, keysAndValues ...interface{})
|
||||||
|
|
||||||
|
// WithValues returns a new LogSink with additional key/value pairs. See
|
||||||
|
// Logger.WithValues for more details.
|
||||||
|
WithValues(keysAndValues ...interface{}) LogSink
|
||||||
|
|
||||||
|
// WithName returns a new LogSink with the specified name appended. See
|
||||||
|
// Logger.WithName for more details.
|
||||||
|
WithName(name string) LogSink
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallDepthLogSink represents a Logger that knows how to climb the call stack
|
||||||
|
// to identify the original call site and can offset the depth by a specified
|
||||||
|
// number of frames. This is useful for users who have helper functions
|
||||||
|
// between the "real" call site and the actual calls to Logger methods.
|
||||||
|
// Implementations that log information about the call site (such as file,
|
||||||
|
// function, or line) would otherwise log information about the intermediate
|
||||||
|
// helper functions.
|
||||||
|
//
|
||||||
|
// This is an optional interface and implementations are not required to
|
||||||
|
// support it.
|
||||||
|
type CallDepthLogSink interface {
|
||||||
|
// WithCallDepth returns a LogSink that will offset the call
|
||||||
|
// stack by the specified number of frames when logging call
|
||||||
|
// site information.
|
||||||
|
//
|
||||||
|
// If depth is 0, the LogSink should skip exactly the number
|
||||||
|
// of call frames defined in RuntimeInfo.CallDepth when Info
|
||||||
|
// or Error are called, i.e. the attribution should be to the
|
||||||
|
// direct caller of Logger.Info or Logger.Error.
|
||||||
|
//
|
||||||
|
// If depth is 1 the attribution should skip 1 call frame, and so on.
|
||||||
|
// Successive calls to this are additive.
|
||||||
|
WithCallDepth(depth int) LogSink
|
||||||
|
}
|
||||||
|
|
||||||
|
// CallStackHelperLogSink represents a Logger that knows how to climb
|
||||||
|
// the call stack to identify the original call site and can skip
|
||||||
|
// intermediate helper functions if they mark themselves as
|
||||||
|
// helper. Go's testing package uses that approach.
|
||||||
|
//
|
||||||
|
// This is useful for users who have helper functions between the
|
||||||
|
// "real" call site and the actual calls to Logger methods.
|
||||||
|
// Implementations that log information about the call site (such as
|
||||||
|
// file, function, or line) would otherwise log information about the
|
||||||
|
// intermediate helper functions.
|
||||||
|
//
|
||||||
|
// This is an optional interface and implementations are not required
|
||||||
|
// to support it. Implementations that choose to support this must not
|
||||||
|
// simply implement it as WithCallDepth(1), because
|
||||||
|
// Logger.WithCallStackHelper will call both methods if they are
|
||||||
|
// present. This should only be implemented for LogSinks that actually
|
||||||
|
// need it, as with testing.T.
|
||||||
|
type CallStackHelperLogSink interface {
|
||||||
|
// GetCallStackHelper returns a function that must be called
|
||||||
|
// to mark the direct caller as helper function when logging
|
||||||
|
// call site information.
|
||||||
|
GetCallStackHelper() func()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshaler is an optional interface that logged values may choose to
|
||||||
|
// implement. Loggers with structured output, such as JSON, should
|
||||||
|
// log the object return by the MarshalLog method instead of the
|
||||||
|
// original value.
|
||||||
|
type Marshaler interface {
|
||||||
|
// MarshalLog can be used to:
|
||||||
|
// - ensure that structs are not logged as strings when the original
|
||||||
|
// value has a String method: return a different type without a
|
||||||
|
// String method
|
||||||
|
// - select which fields of a complex type should get logged:
|
||||||
|
// return a simpler struct with fewer fields
|
||||||
|
// - log unexported fields: return a different struct
|
||||||
|
// with exported fields
|
||||||
|
//
|
||||||
|
// It may return any value of any type.
|
||||||
|
MarshalLog() interface{}
|
||||||
|
}
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,6 @@
|
||||||
|
# Minimal Go logging using logr and Go's standard library
|
||||||
|
|
||||||
|
[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr)
|
||||||
|
|
||||||
|
This package implements the [logr interface](https://github.com/go-logr/logr)
|
||||||
|
in terms of Go's standard log package(https://pkg.go.dev/log).
|
|
@ -0,0 +1,170 @@
|
||||||
|
/*
|
||||||
|
Copyright 2019 The logr Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package stdr implements github.com/go-logr/logr.Logger in terms of
|
||||||
|
// Go's standard log package.
|
||||||
|
package stdr
|
||||||
|
|
||||||
|
import (
|
||||||
|
"log"
|
||||||
|
"os"
|
||||||
|
|
||||||
|
"github.com/go-logr/logr"
|
||||||
|
"github.com/go-logr/logr/funcr"
|
||||||
|
)
|
||||||
|
|
||||||
|
// The global verbosity level. See SetVerbosity().
|
||||||
|
var globalVerbosity int
|
||||||
|
|
||||||
|
// SetVerbosity sets the global level against which all info logs will be
|
||||||
|
// compared. If this is greater than or equal to the "V" of the logger, the
|
||||||
|
// message will be logged. A higher value here means more logs will be written.
|
||||||
|
// The previous verbosity value is returned. This is not concurrent-safe -
|
||||||
|
// callers must be sure to call it from only one goroutine.
|
||||||
|
func SetVerbosity(v int) int {
|
||||||
|
old := globalVerbosity
|
||||||
|
globalVerbosity = v
|
||||||
|
return old
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a logr.Logger which is implemented by Go's standard log package,
|
||||||
|
// or something like it. If std is nil, this will use a default logger
|
||||||
|
// instead.
|
||||||
|
//
|
||||||
|
// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)))
|
||||||
|
func New(std StdLogger) logr.Logger {
|
||||||
|
return NewWithOptions(std, Options{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewWithOptions returns a logr.Logger which is implemented by Go's standard
|
||||||
|
// log package, or something like it. See New for details.
|
||||||
|
func NewWithOptions(std StdLogger, opts Options) logr.Logger {
|
||||||
|
if std == nil {
|
||||||
|
// Go's log.Default() is only available in 1.16 and higher.
|
||||||
|
std = log.New(os.Stderr, "", log.LstdFlags)
|
||||||
|
}
|
||||||
|
|
||||||
|
if opts.Depth < 0 {
|
||||||
|
opts.Depth = 0
|
||||||
|
}
|
||||||
|
|
||||||
|
fopts := funcr.Options{
|
||||||
|
LogCaller: funcr.MessageClass(opts.LogCaller),
|
||||||
|
}
|
||||||
|
|
||||||
|
sl := &logger{
|
||||||
|
Formatter: funcr.NewFormatter(fopts),
|
||||||
|
std: std,
|
||||||
|
}
|
||||||
|
|
||||||
|
// For skipping our own logger.Info/Error.
|
||||||
|
sl.Formatter.AddCallDepth(1 + opts.Depth)
|
||||||
|
|
||||||
|
return logr.New(sl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Options carries parameters which influence the way logs are generated.
|
||||||
|
type Options struct {
|
||||||
|
// Depth biases the assumed number of call frames to the "true" caller.
|
||||||
|
// This is useful when the calling code calls a function which then calls
|
||||||
|
// stdr (e.g. a logging shim to another API). Values less than zero will
|
||||||
|
// be treated as zero.
|
||||||
|
Depth int
|
||||||
|
|
||||||
|
// LogCaller tells stdr to add a "caller" key to some or all log lines.
|
||||||
|
// Go's log package has options to log this natively, too.
|
||||||
|
LogCaller MessageClass
|
||||||
|
|
||||||
|
// TODO: add an option to log the date/time
|
||||||
|
}
|
||||||
|
|
||||||
|
// MessageClass indicates which category or categories of messages to consider.
|
||||||
|
type MessageClass int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// None ignores all message classes.
|
||||||
|
None MessageClass = iota
|
||||||
|
// All considers all message classes.
|
||||||
|
All
|
||||||
|
// Info only considers info messages.
|
||||||
|
Info
|
||||||
|
// Error only considers error messages.
|
||||||
|
Error
|
||||||
|
)
|
||||||
|
|
||||||
|
// StdLogger is the subset of the Go stdlib log.Logger API that is needed for
|
||||||
|
// this adapter.
|
||||||
|
type StdLogger interface {
|
||||||
|
// Output is the same as log.Output and log.Logger.Output.
|
||||||
|
Output(calldepth int, logline string) error
|
||||||
|
}
|
||||||
|
|
||||||
|
type logger struct {
|
||||||
|
funcr.Formatter
|
||||||
|
std StdLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ logr.LogSink = &logger{}
|
||||||
|
var _ logr.CallDepthLogSink = &logger{}
|
||||||
|
|
||||||
|
func (l logger) Enabled(level int) bool {
|
||||||
|
return globalVerbosity >= level
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l logger) Info(level int, msg string, kvList ...interface{}) {
|
||||||
|
prefix, args := l.FormatInfo(level, msg, kvList)
|
||||||
|
if prefix != "" {
|
||||||
|
args = prefix + ": " + args
|
||||||
|
}
|
||||||
|
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l logger) Error(err error, msg string, kvList ...interface{}) {
|
||||||
|
prefix, args := l.FormatError(err, msg, kvList)
|
||||||
|
if prefix != "" {
|
||||||
|
args = prefix + ": " + args
|
||||||
|
}
|
||||||
|
_ = l.std.Output(l.Formatter.GetDepth()+1, args)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l logger) WithName(name string) logr.LogSink {
|
||||||
|
l.Formatter.AddName(name)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l logger) WithValues(kvList ...interface{}) logr.LogSink {
|
||||||
|
l.Formatter.AddValues(kvList)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l logger) WithCallDepth(depth int) logr.LogSink {
|
||||||
|
l.Formatter.AddCallDepth(depth)
|
||||||
|
return &l
|
||||||
|
}
|
||||||
|
|
||||||
|
// Underlier exposes access to the underlying logging implementation. Since
|
||||||
|
// callers only have a logr.Logger, they have to know which implementation is
|
||||||
|
// in use, so this interface is less of an abstraction and more of way to test
|
||||||
|
// type conversion.
|
||||||
|
type Underlier interface {
|
||||||
|
GetUnderlying() StdLogger
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger
|
||||||
|
// is itself an interface, the result may or may not be a Go log.Logger.
|
||||||
|
func (l logger) GetUnderlying() StdLogger {
|
||||||
|
return l.std
|
||||||
|
}
|
|
@ -0,0 +1,27 @@
|
||||||
|
Copyright (c) 2015, Gengo, Inc.
|
||||||
|
All rights reserved.
|
||||||
|
|
||||||
|
Redistribution and use in source and binary forms, with or without modification,
|
||||||
|
are permitted provided that the following conditions are met:
|
||||||
|
|
||||||
|
* Redistributions of source code must retain the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer.
|
||||||
|
|
||||||
|
* Redistributions in binary form must reproduce the above copyright notice,
|
||||||
|
this list of conditions and the following disclaimer in the documentation
|
||||||
|
and/or other materials provided with the distribution.
|
||||||
|
|
||||||
|
* Neither the name of Gengo, Inc. nor the names of its
|
||||||
|
contributors may be used to endorse or promote products derived from this
|
||||||
|
software without specific prior written permission.
|
||||||
|
|
||||||
|
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||||
|
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||||
|
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||||
|
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
|
||||||
|
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||||
|
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||||
|
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||||
|
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||||
|
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||||
|
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
35
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
generated
vendored
Normal file
35
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/BUILD.bazel
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "httprule",
|
||||||
|
srcs = [
|
||||||
|
"compile.go",
|
||||||
|
"parse.go",
|
||||||
|
"types.go",
|
||||||
|
],
|
||||||
|
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule",
|
||||||
|
deps = ["//utilities"],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "httprule_test",
|
||||||
|
size = "small",
|
||||||
|
srcs = [
|
||||||
|
"compile_test.go",
|
||||||
|
"parse_test.go",
|
||||||
|
"types_test.go",
|
||||||
|
],
|
||||||
|
embed = [":httprule"],
|
||||||
|
deps = [
|
||||||
|
"//utilities",
|
||||||
|
"@com_github_golang_glog//:glog",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
alias(
|
||||||
|
name = "go_default_library",
|
||||||
|
actual = ":httprule",
|
||||||
|
visibility = ["//:__subpackages__"],
|
||||||
|
)
|
121
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
generated
vendored
Normal file
121
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/compile.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
||||||
|
package httprule
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
opcodeVersion = 1
|
||||||
|
)
|
||||||
|
|
||||||
|
// Template is a compiled representation of path templates.
|
||||||
|
type Template struct {
|
||||||
|
// Version is the version number of the format.
|
||||||
|
Version int
|
||||||
|
// OpCodes is a sequence of operations.
|
||||||
|
OpCodes []int
|
||||||
|
// Pool is a constant pool
|
||||||
|
Pool []string
|
||||||
|
// Verb is a VERB part in the template.
|
||||||
|
Verb string
|
||||||
|
// Fields is a list of field paths bound in this template.
|
||||||
|
Fields []string
|
||||||
|
// Original template (example: /v1/a_bit_of_everything)
|
||||||
|
Template string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compiler compiles utilities representation of path templates into marshallable operations.
|
||||||
|
// They can be unmarshalled by runtime.NewPattern.
|
||||||
|
type Compiler interface {
|
||||||
|
Compile() Template
|
||||||
|
}
|
||||||
|
|
||||||
|
type op struct {
|
||||||
|
// code is the opcode of the operation
|
||||||
|
code utilities.OpCode
|
||||||
|
|
||||||
|
// str is a string operand of the code.
|
||||||
|
// num is ignored if str is not empty.
|
||||||
|
str string
|
||||||
|
|
||||||
|
// num is a numeric operand of the code.
|
||||||
|
num int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w wildcard) compile() []op {
|
||||||
|
return []op{
|
||||||
|
{code: utilities.OpPush},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w deepWildcard) compile() []op {
|
||||||
|
return []op{
|
||||||
|
{code: utilities.OpPushM},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l literal) compile() []op {
|
||||||
|
return []op{
|
||||||
|
{
|
||||||
|
code: utilities.OpLitPush,
|
||||||
|
str: string(l),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v variable) compile() []op {
|
||||||
|
var ops []op
|
||||||
|
for _, s := range v.segments {
|
||||||
|
ops = append(ops, s.compile()...)
|
||||||
|
}
|
||||||
|
ops = append(ops, op{
|
||||||
|
code: utilities.OpConcatN,
|
||||||
|
num: len(v.segments),
|
||||||
|
}, op{
|
||||||
|
code: utilities.OpCapture,
|
||||||
|
str: v.path,
|
||||||
|
})
|
||||||
|
|
||||||
|
return ops
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t template) Compile() Template {
|
||||||
|
var rawOps []op
|
||||||
|
for _, s := range t.segments {
|
||||||
|
rawOps = append(rawOps, s.compile()...)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
ops []int
|
||||||
|
pool []string
|
||||||
|
fields []string
|
||||||
|
)
|
||||||
|
consts := make(map[string]int)
|
||||||
|
for _, op := range rawOps {
|
||||||
|
ops = append(ops, int(op.code))
|
||||||
|
if op.str == "" {
|
||||||
|
ops = append(ops, op.num)
|
||||||
|
} else {
|
||||||
|
// eof segment literal represents the "/" path pattern
|
||||||
|
if op.str == eof {
|
||||||
|
op.str = ""
|
||||||
|
}
|
||||||
|
if _, ok := consts[op.str]; !ok {
|
||||||
|
consts[op.str] = len(pool)
|
||||||
|
pool = append(pool, op.str)
|
||||||
|
}
|
||||||
|
ops = append(ops, consts[op.str])
|
||||||
|
}
|
||||||
|
if op.code == utilities.OpCapture {
|
||||||
|
fields = append(fields, op.str)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return Template{
|
||||||
|
Version: opcodeVersion,
|
||||||
|
OpCodes: ops,
|
||||||
|
Pool: pool,
|
||||||
|
Verb: t.verb,
|
||||||
|
Fields: fields,
|
||||||
|
Template: t.template,
|
||||||
|
}
|
||||||
|
}
|
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
generated
vendored
Normal file
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/fuzz.go
generated
vendored
Normal file
|
@ -0,0 +1,11 @@
|
||||||
|
// +build gofuzz
|
||||||
|
|
||||||
|
package httprule
|
||||||
|
|
||||||
|
func Fuzz(data []byte) int {
|
||||||
|
_, err := Parse(string(data))
|
||||||
|
if err != nil {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
368
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
Normal file
368
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
Normal file
|
@ -0,0 +1,368 @@
|
||||||
|
package httprule
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// InvalidTemplateError indicates that the path template is not valid.
|
||||||
|
type InvalidTemplateError struct {
|
||||||
|
tmpl string
|
||||||
|
msg string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e InvalidTemplateError) Error() string {
|
||||||
|
return fmt.Sprintf("%s: %s", e.msg, e.tmpl)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses the string representation of path template
|
||||||
|
func Parse(tmpl string) (Compiler, error) {
|
||||||
|
if !strings.HasPrefix(tmpl, "/") {
|
||||||
|
return template{}, InvalidTemplateError{tmpl: tmpl, msg: "no leading /"}
|
||||||
|
}
|
||||||
|
tokens, verb := tokenize(tmpl[1:])
|
||||||
|
|
||||||
|
p := parser{tokens: tokens}
|
||||||
|
segs, err := p.topLevelSegments()
|
||||||
|
if err != nil {
|
||||||
|
return template{}, InvalidTemplateError{tmpl: tmpl, msg: err.Error()}
|
||||||
|
}
|
||||||
|
|
||||||
|
return template{
|
||||||
|
segments: segs,
|
||||||
|
verb: verb,
|
||||||
|
template: tmpl,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func tokenize(path string) (tokens []string, verb string) {
|
||||||
|
if path == "" {
|
||||||
|
return []string{eof}, ""
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
init = iota
|
||||||
|
field
|
||||||
|
nested
|
||||||
|
)
|
||||||
|
st := init
|
||||||
|
for path != "" {
|
||||||
|
var idx int
|
||||||
|
switch st {
|
||||||
|
case init:
|
||||||
|
idx = strings.IndexAny(path, "/{")
|
||||||
|
case field:
|
||||||
|
idx = strings.IndexAny(path, ".=}")
|
||||||
|
case nested:
|
||||||
|
idx = strings.IndexAny(path, "/}")
|
||||||
|
}
|
||||||
|
if idx < 0 {
|
||||||
|
tokens = append(tokens, path)
|
||||||
|
break
|
||||||
|
}
|
||||||
|
switch r := path[idx]; r {
|
||||||
|
case '/', '.':
|
||||||
|
case '{':
|
||||||
|
st = field
|
||||||
|
case '=':
|
||||||
|
st = nested
|
||||||
|
case '}':
|
||||||
|
st = init
|
||||||
|
}
|
||||||
|
if idx == 0 {
|
||||||
|
tokens = append(tokens, path[idx:idx+1])
|
||||||
|
} else {
|
||||||
|
tokens = append(tokens, path[:idx], path[idx:idx+1])
|
||||||
|
}
|
||||||
|
path = path[idx+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
l := len(tokens)
|
||||||
|
// See
|
||||||
|
// https://github.com/grpc-ecosystem/grpc-gateway/pull/1947#issuecomment-774523693 ;
|
||||||
|
// although normal and backwards-compat logic here is to use the last index
|
||||||
|
// of a colon, if the final segment is a variable followed by a colon, the
|
||||||
|
// part following the colon must be a verb. Hence if the previous token is
|
||||||
|
// an end var marker, we switch the index we're looking for to Index instead
|
||||||
|
// of LastIndex, so that we correctly grab the remaining part of the path as
|
||||||
|
// the verb.
|
||||||
|
var penultimateTokenIsEndVar bool
|
||||||
|
switch l {
|
||||||
|
case 0, 1:
|
||||||
|
// Not enough to be variable so skip this logic and don't result in an
|
||||||
|
// invalid index
|
||||||
|
default:
|
||||||
|
penultimateTokenIsEndVar = tokens[l-2] == "}"
|
||||||
|
}
|
||||||
|
t := tokens[l-1]
|
||||||
|
var idx int
|
||||||
|
if penultimateTokenIsEndVar {
|
||||||
|
idx = strings.Index(t, ":")
|
||||||
|
} else {
|
||||||
|
idx = strings.LastIndex(t, ":")
|
||||||
|
}
|
||||||
|
if idx == 0 {
|
||||||
|
tokens, verb = tokens[:l-1], t[1:]
|
||||||
|
} else if idx > 0 {
|
||||||
|
tokens[l-1], verb = t[:idx], t[idx+1:]
|
||||||
|
}
|
||||||
|
tokens = append(tokens, eof)
|
||||||
|
return tokens, verb
|
||||||
|
}
|
||||||
|
|
||||||
|
// parser is a parser of the template syntax defined in github.com/googleapis/googleapis/google/api/http.proto.
|
||||||
|
type parser struct {
|
||||||
|
tokens []string
|
||||||
|
accepted []string
|
||||||
|
}
|
||||||
|
|
||||||
|
// topLevelSegments is the target of this parser.
|
||||||
|
func (p *parser) topLevelSegments() ([]segment, error) {
|
||||||
|
if _, err := p.accept(typeEOF); err == nil {
|
||||||
|
p.tokens = p.tokens[:0]
|
||||||
|
return []segment{literal(eof)}, nil
|
||||||
|
}
|
||||||
|
segs, err := p.segments()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if _, err := p.accept(typeEOF); err != nil {
|
||||||
|
return nil, fmt.Errorf("unexpected token %q after segments %q", p.tokens[0], strings.Join(p.accepted, ""))
|
||||||
|
}
|
||||||
|
return segs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) segments() ([]segment, error) {
|
||||||
|
s, err := p.segment()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
segs := []segment{s}
|
||||||
|
for {
|
||||||
|
if _, err := p.accept("/"); err != nil {
|
||||||
|
return segs, nil
|
||||||
|
}
|
||||||
|
s, err := p.segment()
|
||||||
|
if err != nil {
|
||||||
|
return segs, err
|
||||||
|
}
|
||||||
|
segs = append(segs, s)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) segment() (segment, error) {
|
||||||
|
if _, err := p.accept("*"); err == nil {
|
||||||
|
return wildcard{}, nil
|
||||||
|
}
|
||||||
|
if _, err := p.accept("**"); err == nil {
|
||||||
|
return deepWildcard{}, nil
|
||||||
|
}
|
||||||
|
if l, err := p.literal(); err == nil {
|
||||||
|
return l, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
v, err := p.variable()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("segment neither wildcards, literal or variable: %v", err)
|
||||||
|
}
|
||||||
|
return v, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) literal() (segment, error) {
|
||||||
|
lit, err := p.accept(typeLiteral)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return literal(lit), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) variable() (segment, error) {
|
||||||
|
if _, err := p.accept("{"); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
path, err := p.fieldPath()
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
var segs []segment
|
||||||
|
if _, err := p.accept("="); err == nil {
|
||||||
|
segs, err = p.segments()
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("invalid segment in variable %q: %v", path, err)
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
segs = []segment{wildcard{}}
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := p.accept("}"); err != nil {
|
||||||
|
return nil, fmt.Errorf("unterminated variable segment: %s", path)
|
||||||
|
}
|
||||||
|
return variable{
|
||||||
|
path: path,
|
||||||
|
segments: segs,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *parser) fieldPath() (string, error) {
|
||||||
|
c, err := p.accept(typeIdent)
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
components := []string{c}
|
||||||
|
for {
|
||||||
|
if _, err = p.accept("."); err != nil {
|
||||||
|
return strings.Join(components, "."), nil
|
||||||
|
}
|
||||||
|
c, err := p.accept(typeIdent)
|
||||||
|
if err != nil {
|
||||||
|
return "", fmt.Errorf("invalid field path component: %v", err)
|
||||||
|
}
|
||||||
|
components = append(components, c)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// A termType is a type of terminal symbols.
|
||||||
|
type termType string
|
||||||
|
|
||||||
|
// These constants define some of valid values of termType.
|
||||||
|
// They improve readability of parse functions.
|
||||||
|
//
|
||||||
|
// You can also use "/", "*", "**", "." or "=" as valid values.
|
||||||
|
const (
|
||||||
|
typeIdent = termType("ident")
|
||||||
|
typeLiteral = termType("literal")
|
||||||
|
typeEOF = termType("$")
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// eof is the terminal symbol which always appears at the end of token sequence.
|
||||||
|
eof = "\u0000"
|
||||||
|
)
|
||||||
|
|
||||||
|
// accept tries to accept a token in "p".
|
||||||
|
// This function consumes a token and returns it if it matches to the specified "term".
|
||||||
|
// If it doesn't match, the function does not consume any tokens and return an error.
|
||||||
|
func (p *parser) accept(term termType) (string, error) {
|
||||||
|
t := p.tokens[0]
|
||||||
|
switch term {
|
||||||
|
case "/", "*", "**", ".", "=", "{", "}":
|
||||||
|
if t != string(term) && t != "/" {
|
||||||
|
return "", fmt.Errorf("expected %q but got %q", term, t)
|
||||||
|
}
|
||||||
|
case typeEOF:
|
||||||
|
if t != eof {
|
||||||
|
return "", fmt.Errorf("expected EOF but got %q", t)
|
||||||
|
}
|
||||||
|
case typeIdent:
|
||||||
|
if err := expectIdent(t); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
case typeLiteral:
|
||||||
|
if err := expectPChars(t); err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return "", fmt.Errorf("unknown termType %q", term)
|
||||||
|
}
|
||||||
|
p.tokens = p.tokens[1:]
|
||||||
|
p.accepted = append(p.accepted, t)
|
||||||
|
return t, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
|
||||||
|
//
|
||||||
|
// https://www.ietf.org/rfc/rfc3986.txt, P.49
|
||||||
|
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||||
|
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||||
|
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||||
|
// / "*" / "+" / "," / ";" / "="
|
||||||
|
// pct-encoded = "%" HEXDIG HEXDIG
|
||||||
|
func expectPChars(t string) error {
|
||||||
|
const (
|
||||||
|
init = iota
|
||||||
|
pct1
|
||||||
|
pct2
|
||||||
|
)
|
||||||
|
st := init
|
||||||
|
for _, r := range t {
|
||||||
|
if st != init {
|
||||||
|
if !isHexDigit(r) {
|
||||||
|
return fmt.Errorf("invalid hexdigit: %c(%U)", r, r)
|
||||||
|
}
|
||||||
|
switch st {
|
||||||
|
case pct1:
|
||||||
|
st = pct2
|
||||||
|
case pct2:
|
||||||
|
st = init
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
// unreserved
|
||||||
|
switch {
|
||||||
|
case 'A' <= r && r <= 'Z':
|
||||||
|
continue
|
||||||
|
case 'a' <= r && r <= 'z':
|
||||||
|
continue
|
||||||
|
case '0' <= r && r <= '9':
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch r {
|
||||||
|
case '-', '.', '_', '~':
|
||||||
|
// unreserved
|
||||||
|
case '!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=':
|
||||||
|
// sub-delims
|
||||||
|
case ':', '@':
|
||||||
|
// rest of pchar
|
||||||
|
case '%':
|
||||||
|
// pct-encoded
|
||||||
|
st = pct1
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid character in path segment: %q(%U)", r, r)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if st != init {
|
||||||
|
return fmt.Errorf("invalid percent-encoding in %q", t)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// expectIdent determines if "ident" is a valid identifier in .proto schema ([[:alpha:]_][[:alphanum:]_]*).
|
||||||
|
func expectIdent(ident string) error {
|
||||||
|
if ident == "" {
|
||||||
|
return fmt.Errorf("empty identifier")
|
||||||
|
}
|
||||||
|
for pos, r := range ident {
|
||||||
|
switch {
|
||||||
|
case '0' <= r && r <= '9':
|
||||||
|
if pos == 0 {
|
||||||
|
return fmt.Errorf("identifier starting with digit: %s", ident)
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
case 'A' <= r && r <= 'Z':
|
||||||
|
continue
|
||||||
|
case 'a' <= r && r <= 'z':
|
||||||
|
continue
|
||||||
|
case r == '_':
|
||||||
|
continue
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid character %q(%U) in identifier: %s", r, r, ident)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isHexDigit(r rune) bool {
|
||||||
|
switch {
|
||||||
|
case '0' <= r && r <= '9':
|
||||||
|
return true
|
||||||
|
case 'A' <= r && r <= 'F':
|
||||||
|
return true
|
||||||
|
case 'a' <= r && r <= 'f':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
60
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
generated
vendored
Normal file
60
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/types.go
generated
vendored
Normal file
|
@ -0,0 +1,60 @@
|
||||||
|
package httprule
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
type template struct {
|
||||||
|
segments []segment
|
||||||
|
verb string
|
||||||
|
template string
|
||||||
|
}
|
||||||
|
|
||||||
|
type segment interface {
|
||||||
|
fmt.Stringer
|
||||||
|
compile() (ops []op)
|
||||||
|
}
|
||||||
|
|
||||||
|
type wildcard struct{}
|
||||||
|
|
||||||
|
type deepWildcard struct{}
|
||||||
|
|
||||||
|
type literal string
|
||||||
|
|
||||||
|
type variable struct {
|
||||||
|
path string
|
||||||
|
segments []segment
|
||||||
|
}
|
||||||
|
|
||||||
|
func (wildcard) String() string {
|
||||||
|
return "*"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (deepWildcard) String() string {
|
||||||
|
return "**"
|
||||||
|
}
|
||||||
|
|
||||||
|
func (l literal) String() string {
|
||||||
|
return string(l)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v variable) String() string {
|
||||||
|
var segs []string
|
||||||
|
for _, s := range v.segments {
|
||||||
|
segs = append(segs, s.String())
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("{%s=%s}", v.path, strings.Join(segs, "/"))
|
||||||
|
}
|
||||||
|
|
||||||
|
func (t template) String() string {
|
||||||
|
var segs []string
|
||||||
|
for _, s := range t.segments {
|
||||||
|
segs = append(segs, s.String())
|
||||||
|
}
|
||||||
|
str := strings.Join(segs, "/")
|
||||||
|
if t.verb != "" {
|
||||||
|
str = fmt.Sprintf("%s:%s", str, t.verb)
|
||||||
|
}
|
||||||
|
return "/" + str
|
||||||
|
}
|
91
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
Normal file
91
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
Normal file
|
@ -0,0 +1,91 @@
|
||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "runtime",
|
||||||
|
srcs = [
|
||||||
|
"context.go",
|
||||||
|
"convert.go",
|
||||||
|
"doc.go",
|
||||||
|
"errors.go",
|
||||||
|
"fieldmask.go",
|
||||||
|
"handler.go",
|
||||||
|
"marshal_httpbodyproto.go",
|
||||||
|
"marshal_json.go",
|
||||||
|
"marshal_jsonpb.go",
|
||||||
|
"marshal_proto.go",
|
||||||
|
"marshaler.go",
|
||||||
|
"marshaler_registry.go",
|
||||||
|
"mux.go",
|
||||||
|
"pattern.go",
|
||||||
|
"proto2_convert.go",
|
||||||
|
"query.go",
|
||||||
|
],
|
||||||
|
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/runtime",
|
||||||
|
deps = [
|
||||||
|
"//internal/httprule",
|
||||||
|
"//utilities",
|
||||||
|
"@go_googleapis//google/api:httpbody_go_proto",
|
||||||
|
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||||
|
"@org_golang_google_grpc//codes",
|
||||||
|
"@org_golang_google_grpc//grpclog",
|
||||||
|
"@org_golang_google_grpc//metadata",
|
||||||
|
"@org_golang_google_grpc//status",
|
||||||
|
"@org_golang_google_protobuf//encoding/protojson",
|
||||||
|
"@org_golang_google_protobuf//proto",
|
||||||
|
"@org_golang_google_protobuf//reflect/protoreflect",
|
||||||
|
"@org_golang_google_protobuf//reflect/protoregistry",
|
||||||
|
"@org_golang_google_protobuf//types/known/durationpb",
|
||||||
|
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||||
|
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "runtime_test",
|
||||||
|
size = "small",
|
||||||
|
srcs = [
|
||||||
|
"context_test.go",
|
||||||
|
"convert_test.go",
|
||||||
|
"errors_test.go",
|
||||||
|
"fieldmask_test.go",
|
||||||
|
"handler_test.go",
|
||||||
|
"marshal_httpbodyproto_test.go",
|
||||||
|
"marshal_json_test.go",
|
||||||
|
"marshal_jsonpb_test.go",
|
||||||
|
"marshal_proto_test.go",
|
||||||
|
"marshaler_registry_test.go",
|
||||||
|
"mux_test.go",
|
||||||
|
"pattern_test.go",
|
||||||
|
"query_test.go",
|
||||||
|
],
|
||||||
|
embed = [":runtime"],
|
||||||
|
deps = [
|
||||||
|
"//runtime/internal/examplepb",
|
||||||
|
"//utilities",
|
||||||
|
"@com_github_google_go_cmp//cmp",
|
||||||
|
"@com_github_google_go_cmp//cmp/cmpopts",
|
||||||
|
"@go_googleapis//google/api:httpbody_go_proto",
|
||||||
|
"@go_googleapis//google/rpc:errdetails_go_proto",
|
||||||
|
"@go_googleapis//google/rpc:status_go_proto",
|
||||||
|
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||||
|
"@org_golang_google_grpc//codes",
|
||||||
|
"@org_golang_google_grpc//metadata",
|
||||||
|
"@org_golang_google_grpc//status",
|
||||||
|
"@org_golang_google_protobuf//encoding/protojson",
|
||||||
|
"@org_golang_google_protobuf//proto",
|
||||||
|
"@org_golang_google_protobuf//testing/protocmp",
|
||||||
|
"@org_golang_google_protobuf//types/known/durationpb",
|
||||||
|
"@org_golang_google_protobuf//types/known/emptypb",
|
||||||
|
"@org_golang_google_protobuf//types/known/structpb",
|
||||||
|
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||||
|
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
alias(
|
||||||
|
name = "go_default_library",
|
||||||
|
actual = ":runtime",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
345
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
Normal file
345
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
Normal file
|
@ -0,0 +1,345 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"net"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"sync"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MetadataHeaderPrefix is the http prefix that represents custom metadata
|
||||||
|
// parameters to or from a gRPC call.
|
||||||
|
const MetadataHeaderPrefix = "Grpc-Metadata-"
|
||||||
|
|
||||||
|
// MetadataPrefix is prepended to permanent HTTP header keys (as specified
|
||||||
|
// by the IANA) when added to the gRPC context.
|
||||||
|
const MetadataPrefix = "grpcgateway-"
|
||||||
|
|
||||||
|
// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to
|
||||||
|
// HTTP headers in a response handled by grpc-gateway
|
||||||
|
const MetadataTrailerPrefix = "Grpc-Trailer-"
|
||||||
|
|
||||||
|
const metadataGrpcTimeout = "Grpc-Timeout"
|
||||||
|
const metadataHeaderBinarySuffix = "-Bin"
|
||||||
|
|
||||||
|
const xForwardedFor = "X-Forwarded-For"
|
||||||
|
const xForwardedHost = "X-Forwarded-Host"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound
|
||||||
|
// header isn't present. If the value is 0 the sent `context` will not have a timeout.
|
||||||
|
DefaultContextTimeout = 0 * time.Second
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
rpcMethodKey struct{}
|
||||||
|
httpPathPatternKey struct{}
|
||||||
|
|
||||||
|
AnnotateContextOption func(ctx context.Context) context.Context
|
||||||
|
)
|
||||||
|
|
||||||
|
func WithHTTPPathPattern(pattern string) AnnotateContextOption {
|
||||||
|
return func(ctx context.Context) context.Context {
|
||||||
|
return withHTTPPathPattern(ctx, pattern)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeBinHeader(v string) ([]byte, error) {
|
||||||
|
if len(v)%4 == 0 {
|
||||||
|
// Input was padded, or padding was not necessary.
|
||||||
|
return base64.StdEncoding.DecodeString(v)
|
||||||
|
}
|
||||||
|
return base64.RawStdEncoding.DecodeString(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
AnnotateContext adds context information such as metadata from the request.
|
||||||
|
|
||||||
|
At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For",
|
||||||
|
except that the forwarded destination is not another HTTP service but rather
|
||||||
|
a gRPC service.
|
||||||
|
*/
|
||||||
|
func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
|
||||||
|
ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if md == nil {
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return metadata.NewOutgoingContext(ctx, md), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AnnotateIncomingContext adds context information such as metadata from the request.
|
||||||
|
// Attach metadata as incoming context.
|
||||||
|
func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, error) {
|
||||||
|
ctx, md, err := annotateContext(ctx, mux, req, rpcMethodName, options...)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
if md == nil {
|
||||||
|
return ctx, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return metadata.NewIncomingContext(ctx, md), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request, rpcMethodName string, options ...AnnotateContextOption) (context.Context, metadata.MD, error) {
|
||||||
|
ctx = withRPCMethod(ctx, rpcMethodName)
|
||||||
|
for _, o := range options {
|
||||||
|
ctx = o(ctx)
|
||||||
|
}
|
||||||
|
var pairs []string
|
||||||
|
timeout := DefaultContextTimeout
|
||||||
|
if tm := req.Header.Get(metadataGrpcTimeout); tm != "" {
|
||||||
|
var err error
|
||||||
|
timeout, err = timeoutDecode(tm)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for key, vals := range req.Header {
|
||||||
|
key = textproto.CanonicalMIMEHeaderKey(key)
|
||||||
|
for _, val := range vals {
|
||||||
|
// For backwards-compatibility, pass through 'authorization' header with no prefix.
|
||||||
|
if key == "Authorization" {
|
||||||
|
pairs = append(pairs, "authorization", val)
|
||||||
|
}
|
||||||
|
if h, ok := mux.incomingHeaderMatcher(key); ok {
|
||||||
|
// Handles "-bin" metadata in grpc, since grpc will do another base64
|
||||||
|
// encode before sending to server, we need to decode it first.
|
||||||
|
if strings.HasSuffix(key, metadataHeaderBinarySuffix) {
|
||||||
|
b, err := decodeBinHeader(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
val = string(b)
|
||||||
|
}
|
||||||
|
pairs = append(pairs, h, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if host := req.Header.Get(xForwardedHost); host != "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedHost), host)
|
||||||
|
} else if req.Host != "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host)
|
||||||
|
}
|
||||||
|
|
||||||
|
if addr := req.RemoteAddr; addr != "" {
|
||||||
|
if remoteIP, _, err := net.SplitHostPort(addr); err == nil {
|
||||||
|
if fwd := req.Header.Get(xForwardedFor); fwd == "" {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP)
|
||||||
|
} else {
|
||||||
|
pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if timeout != 0 {
|
||||||
|
//nolint:govet // The context outlives this function
|
||||||
|
ctx, _ = context.WithTimeout(ctx, timeout)
|
||||||
|
}
|
||||||
|
if len(pairs) == 0 {
|
||||||
|
return ctx, nil, nil
|
||||||
|
}
|
||||||
|
md := metadata.Pairs(pairs...)
|
||||||
|
for _, mda := range mux.metadataAnnotators {
|
||||||
|
md = metadata.Join(md, mda(ctx, req))
|
||||||
|
}
|
||||||
|
return ctx, md, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerMetadata consists of metadata sent from gRPC server.
|
||||||
|
type ServerMetadata struct {
|
||||||
|
HeaderMD metadata.MD
|
||||||
|
TrailerMD metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
type serverMetadataKey struct{}
|
||||||
|
|
||||||
|
// NewServerMetadataContext creates a new context with ServerMetadata
|
||||||
|
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
||||||
|
return context.WithValue(ctx, serverMetadataKey{}, md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
||||||
|
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
||||||
|
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServerTransportStream implements grpc.ServerTransportStream.
|
||||||
|
// It should only be used by the generated files to support grpc.SendHeader
|
||||||
|
// outside of gRPC server use.
|
||||||
|
type ServerTransportStream struct {
|
||||||
|
mu sync.Mutex
|
||||||
|
header metadata.MD
|
||||||
|
trailer metadata.MD
|
||||||
|
}
|
||||||
|
|
||||||
|
// Method returns the method for the stream.
|
||||||
|
func (s *ServerTransportStream) Method() string {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
// Header returns the header metadata of the stream.
|
||||||
|
func (s *ServerTransportStream) Header() metadata.MD {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
return s.header.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetHeader sets the header metadata.
|
||||||
|
func (s *ServerTransportStream) SetHeader(md metadata.MD) error {
|
||||||
|
if md.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
s.header = metadata.Join(s.header, md)
|
||||||
|
s.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// SendHeader sets the header metadata.
|
||||||
|
func (s *ServerTransportStream) SendHeader(md metadata.MD) error {
|
||||||
|
return s.SetHeader(md)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Trailer returns the cached trailer metadata.
|
||||||
|
func (s *ServerTransportStream) Trailer() metadata.MD {
|
||||||
|
s.mu.Lock()
|
||||||
|
defer s.mu.Unlock()
|
||||||
|
return s.trailer.Copy()
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetTrailer sets the trailer metadata.
|
||||||
|
func (s *ServerTransportStream) SetTrailer(md metadata.MD) error {
|
||||||
|
if md.Len() == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
s.mu.Lock()
|
||||||
|
s.trailer = metadata.Join(s.trailer, md)
|
||||||
|
s.mu.Unlock()
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeoutDecode(s string) (time.Duration, error) {
|
||||||
|
size := len(s)
|
||||||
|
if size < 2 {
|
||||||
|
return 0, fmt.Errorf("timeout string is too short: %q", s)
|
||||||
|
}
|
||||||
|
d, ok := timeoutUnitToDuration(s[size-1])
|
||||||
|
if !ok {
|
||||||
|
return 0, fmt.Errorf("timeout unit is not recognized: %q", s)
|
||||||
|
}
|
||||||
|
t, err := strconv.ParseInt(s[:size-1], 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return d * time.Duration(t), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) {
|
||||||
|
switch u {
|
||||||
|
case 'H':
|
||||||
|
return time.Hour, true
|
||||||
|
case 'M':
|
||||||
|
return time.Minute, true
|
||||||
|
case 'S':
|
||||||
|
return time.Second, true
|
||||||
|
case 'm':
|
||||||
|
return time.Millisecond, true
|
||||||
|
case 'u':
|
||||||
|
return time.Microsecond, true
|
||||||
|
case 'n':
|
||||||
|
return time.Nanosecond, true
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// isPermanentHTTPHeader checks whether hdr belongs to the list of
|
||||||
|
// permanent request headers maintained by IANA.
|
||||||
|
// http://www.iana.org/assignments/message-headers/message-headers.xml
|
||||||
|
func isPermanentHTTPHeader(hdr string) bool {
|
||||||
|
switch hdr {
|
||||||
|
case
|
||||||
|
"Accept",
|
||||||
|
"Accept-Charset",
|
||||||
|
"Accept-Language",
|
||||||
|
"Accept-Ranges",
|
||||||
|
"Authorization",
|
||||||
|
"Cache-Control",
|
||||||
|
"Content-Type",
|
||||||
|
"Cookie",
|
||||||
|
"Date",
|
||||||
|
"Expect",
|
||||||
|
"From",
|
||||||
|
"Host",
|
||||||
|
"If-Match",
|
||||||
|
"If-Modified-Since",
|
||||||
|
"If-None-Match",
|
||||||
|
"If-Schedule-Tag-Match",
|
||||||
|
"If-Unmodified-Since",
|
||||||
|
"Max-Forwards",
|
||||||
|
"Origin",
|
||||||
|
"Pragma",
|
||||||
|
"Referer",
|
||||||
|
"User-Agent",
|
||||||
|
"Via",
|
||||||
|
"Warning":
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
// RPCMethod returns the method string for the server context. The returned
|
||||||
|
// string is in the format of "/package.service/method".
|
||||||
|
func RPCMethod(ctx context.Context) (string, bool) {
|
||||||
|
m := ctx.Value(rpcMethodKey{})
|
||||||
|
if m == nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
ms, ok := m.(string)
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return ms, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func withRPCMethod(ctx context.Context, rpcMethodName string) context.Context {
|
||||||
|
return context.WithValue(ctx, rpcMethodKey{}, rpcMethodName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPPathPattern returns the HTTP path pattern string relating to the HTTP handler, if one exists.
|
||||||
|
// The format of the returned string is defined by the google.api.http path template type.
|
||||||
|
func HTTPPathPattern(ctx context.Context) (string, bool) {
|
||||||
|
m := ctx.Value(httpPathPatternKey{})
|
||||||
|
if m == nil {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
ms, ok := m.(string)
|
||||||
|
if !ok {
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
return ms, true
|
||||||
|
}
|
||||||
|
|
||||||
|
func withHTTPPathPattern(ctx context.Context, httpPathPattern string) context.Context {
|
||||||
|
return context.WithValue(ctx, httpPathPatternKey{}, httpPathPattern)
|
||||||
|
}
|
322
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
Normal file
322
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
Normal file
|
@ -0,0 +1,322 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||||
|
)
|
||||||
|
|
||||||
|
// String just returns the given string.
|
||||||
|
// It is just for compatibility to other types.
|
||||||
|
func String(val string) (string, error) {
|
||||||
|
return val, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringSlice converts 'val' where individual strings are separated by
|
||||||
|
// 'sep' into a string slice.
|
||||||
|
func StringSlice(val, sep string) ([]string, error) {
|
||||||
|
return strings.Split(val, sep), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool converts the given string representation of a boolean value into bool.
|
||||||
|
func Bool(val string) (bool, error) {
|
||||||
|
return strconv.ParseBool(val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolSlice converts 'val' where individual booleans are separated by
|
||||||
|
// 'sep' into a bool slice.
|
||||||
|
func BoolSlice(val, sep string) ([]bool, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]bool, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Bool(v)
|
||||||
|
if err != nil {
|
||||||
|
return values, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 converts the given string representation into representation of a floating point number into float64.
|
||||||
|
func Float64(val string) (float64, error) {
|
||||||
|
return strconv.ParseFloat(val, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Slice converts 'val' where individual floating point numbers are separated by
|
||||||
|
// 'sep' into a float64 slice.
|
||||||
|
func Float64Slice(val, sep string) ([]float64, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]float64, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Float64(v)
|
||||||
|
if err != nil {
|
||||||
|
return values, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32 converts the given string representation of a floating point number into float32.
|
||||||
|
func Float32(val string) (float32, error) {
|
||||||
|
f, err := strconv.ParseFloat(val, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return float32(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32Slice converts 'val' where individual floating point numbers are separated by
|
||||||
|
// 'sep' into a float32 slice.
|
||||||
|
func Float32Slice(val, sep string) ([]float32, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]float32, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Float32(v)
|
||||||
|
if err != nil {
|
||||||
|
return values, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 converts the given string representation of an integer into int64.
|
||||||
|
func Int64(val string) (int64, error) {
|
||||||
|
return strconv.ParseInt(val, 0, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Slice converts 'val' where individual integers are separated by
|
||||||
|
// 'sep' into a int64 slice.
|
||||||
|
func Int64Slice(val, sep string) ([]int64, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]int64, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Int64(v)
|
||||||
|
if err != nil {
|
||||||
|
return values, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32 converts the given string representation of an integer into int32.
|
||||||
|
func Int32(val string) (int32, error) {
|
||||||
|
i, err := strconv.ParseInt(val, 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return int32(i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32Slice converts 'val' where individual integers are separated by
|
||||||
|
// 'sep' into a int32 slice.
|
||||||
|
func Int32Slice(val, sep string) ([]int32, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]int32, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Int32(v)
|
||||||
|
if err != nil {
|
||||||
|
return values, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64 converts the given string representation of an integer into uint64.
|
||||||
|
func Uint64(val string) (uint64, error) {
|
||||||
|
return strconv.ParseUint(val, 0, 64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64Slice converts 'val' where individual integers are separated by
|
||||||
|
// 'sep' into a uint64 slice.
|
||||||
|
func Uint64Slice(val, sep string) ([]uint64, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]uint64, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Uint64(v)
|
||||||
|
if err != nil {
|
||||||
|
return values, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32 converts the given string representation of an integer into uint32.
|
||||||
|
func Uint32(val string) (uint32, error) {
|
||||||
|
i, err := strconv.ParseUint(val, 0, 32)
|
||||||
|
if err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
return uint32(i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32Slice converts 'val' where individual integers are separated by
|
||||||
|
// 'sep' into a uint32 slice.
|
||||||
|
func Uint32Slice(val, sep string) ([]uint32, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]uint32, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Uint32(v)
|
||||||
|
if err != nil {
|
||||||
|
return values, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bytes converts the given string representation of a byte sequence into a slice of bytes
|
||||||
|
// A bytes sequence is encoded in URL-safe base64 without padding
|
||||||
|
func Bytes(val string) ([]byte, error) {
|
||||||
|
b, err := base64.StdEncoding.DecodeString(val)
|
||||||
|
if err != nil {
|
||||||
|
b, err = base64.URLEncoding.DecodeString(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe
|
||||||
|
// base64 without padding, are separated by 'sep' into a slice of bytes slices slice.
|
||||||
|
func BytesSlice(val, sep string) ([][]byte, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([][]byte, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Bytes(v)
|
||||||
|
if err != nil {
|
||||||
|
return values, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp.
|
||||||
|
func Timestamp(val string) (*timestamppb.Timestamp, error) {
|
||||||
|
var r timestamppb.Timestamp
|
||||||
|
val = strconv.Quote(strings.Trim(val, `"`))
|
||||||
|
unmarshaler := &protojson.UnmarshalOptions{}
|
||||||
|
err := unmarshaler.Unmarshal([]byte(val), &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Duration converts the given string into a timestamp.Duration.
|
||||||
|
func Duration(val string) (*durationpb.Duration, error) {
|
||||||
|
var r durationpb.Duration
|
||||||
|
val = strconv.Quote(strings.Trim(val, `"`))
|
||||||
|
unmarshaler := &protojson.UnmarshalOptions{}
|
||||||
|
err := unmarshaler.Unmarshal([]byte(val), &r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &r, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enum converts the given string into an int32 that should be type casted into the
|
||||||
|
// correct enum proto type.
|
||||||
|
func Enum(val string, enumValMap map[string]int32) (int32, error) {
|
||||||
|
e, ok := enumValMap[val]
|
||||||
|
if ok {
|
||||||
|
return e, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
i, err := Int32(val)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("%s is not valid", val)
|
||||||
|
}
|
||||||
|
for _, v := range enumValMap {
|
||||||
|
if v == i {
|
||||||
|
return i, nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0, fmt.Errorf("%s is not valid", val)
|
||||||
|
}
|
||||||
|
|
||||||
|
// EnumSlice converts 'val' where individual enums are separated by 'sep'
|
||||||
|
// into a int32 slice. Each individual int32 should be type casted into the
|
||||||
|
// correct enum proto type.
|
||||||
|
func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
|
||||||
|
s := strings.Split(val, sep)
|
||||||
|
values := make([]int32, len(s))
|
||||||
|
for i, v := range s {
|
||||||
|
value, err := Enum(v, enumValMap)
|
||||||
|
if err != nil {
|
||||||
|
return values, err
|
||||||
|
}
|
||||||
|
values[i] = value
|
||||||
|
}
|
||||||
|
return values, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
Support fot google.protobuf.wrappers on top of primitive types
|
||||||
|
*/
|
||||||
|
|
||||||
|
// StringValue well-known type support as wrapper around string type
|
||||||
|
func StringValue(val string) (*wrapperspb.StringValue, error) {
|
||||||
|
return &wrapperspb.StringValue{Value: val}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FloatValue well-known type support as wrapper around float32 type
|
||||||
|
func FloatValue(val string) (*wrapperspb.FloatValue, error) {
|
||||||
|
parsedVal, err := Float32(val)
|
||||||
|
return &wrapperspb.FloatValue{Value: parsedVal}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// DoubleValue well-known type support as wrapper around float64 type
|
||||||
|
func DoubleValue(val string) (*wrapperspb.DoubleValue, error) {
|
||||||
|
parsedVal, err := Float64(val)
|
||||||
|
return &wrapperspb.DoubleValue{Value: parsedVal}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolValue well-known type support as wrapper around bool type
|
||||||
|
func BoolValue(val string) (*wrapperspb.BoolValue, error) {
|
||||||
|
parsedVal, err := Bool(val)
|
||||||
|
return &wrapperspb.BoolValue{Value: parsedVal}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32Value well-known type support as wrapper around int32 type
|
||||||
|
func Int32Value(val string) (*wrapperspb.Int32Value, error) {
|
||||||
|
parsedVal, err := Int32(val)
|
||||||
|
return &wrapperspb.Int32Value{Value: parsedVal}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UInt32Value well-known type support as wrapper around uint32 type
|
||||||
|
func UInt32Value(val string) (*wrapperspb.UInt32Value, error) {
|
||||||
|
parsedVal, err := Uint32(val)
|
||||||
|
return &wrapperspb.UInt32Value{Value: parsedVal}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Value well-known type support as wrapper around int64 type
|
||||||
|
func Int64Value(val string) (*wrapperspb.Int64Value, error) {
|
||||||
|
parsedVal, err := Int64(val)
|
||||||
|
return &wrapperspb.Int64Value{Value: parsedVal}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// UInt64Value well-known type support as wrapper around uint64 type
|
||||||
|
func UInt64Value(val string) (*wrapperspb.UInt64Value, error) {
|
||||||
|
parsedVal, err := Uint64(val)
|
||||||
|
return &wrapperspb.UInt64Value{Value: parsedVal}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// BytesValue well-known type support as wrapper around bytes[] type
|
||||||
|
func BytesValue(val string) (*wrapperspb.BytesValue, error) {
|
||||||
|
parsedVal, err := Bytes(val)
|
||||||
|
return &wrapperspb.BytesValue{Value: parsedVal}, err
|
||||||
|
}
|
|
@ -0,0 +1,5 @@
|
||||||
|
/*
|
||||||
|
Package runtime contains runtime helper functions used by
|
||||||
|
servers which protoc-gen-grpc-gateway generates.
|
||||||
|
*/
|
||||||
|
package runtime
|
180
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
Normal file
180
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,180 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ErrorHandlerFunc is the signature used to configure error handling.
|
||||||
|
type ErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error)
|
||||||
|
|
||||||
|
// StreamErrorHandlerFunc is the signature used to configure stream error handling.
|
||||||
|
type StreamErrorHandlerFunc func(context.Context, error) *status.Status
|
||||||
|
|
||||||
|
// RoutingErrorHandlerFunc is the signature used to configure error handling for routing errors.
|
||||||
|
type RoutingErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, int)
|
||||||
|
|
||||||
|
// HTTPStatusError is the error to use when needing to provide a different HTTP status code for an error
|
||||||
|
// passed to the DefaultRoutingErrorHandler.
|
||||||
|
type HTTPStatusError struct {
|
||||||
|
HTTPStatus int
|
||||||
|
Err error
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *HTTPStatusError) Error() string {
|
||||||
|
return e.Err.Error()
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status.
|
||||||
|
// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto
|
||||||
|
func HTTPStatusFromCode(code codes.Code) int {
|
||||||
|
switch code {
|
||||||
|
case codes.OK:
|
||||||
|
return http.StatusOK
|
||||||
|
case codes.Canceled:
|
||||||
|
return http.StatusRequestTimeout
|
||||||
|
case codes.Unknown:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
case codes.InvalidArgument:
|
||||||
|
return http.StatusBadRequest
|
||||||
|
case codes.DeadlineExceeded:
|
||||||
|
return http.StatusGatewayTimeout
|
||||||
|
case codes.NotFound:
|
||||||
|
return http.StatusNotFound
|
||||||
|
case codes.AlreadyExists:
|
||||||
|
return http.StatusConflict
|
||||||
|
case codes.PermissionDenied:
|
||||||
|
return http.StatusForbidden
|
||||||
|
case codes.Unauthenticated:
|
||||||
|
return http.StatusUnauthorized
|
||||||
|
case codes.ResourceExhausted:
|
||||||
|
return http.StatusTooManyRequests
|
||||||
|
case codes.FailedPrecondition:
|
||||||
|
// Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status.
|
||||||
|
return http.StatusBadRequest
|
||||||
|
case codes.Aborted:
|
||||||
|
return http.StatusConflict
|
||||||
|
case codes.OutOfRange:
|
||||||
|
return http.StatusBadRequest
|
||||||
|
case codes.Unimplemented:
|
||||||
|
return http.StatusNotImplemented
|
||||||
|
case codes.Internal:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
case codes.Unavailable:
|
||||||
|
return http.StatusServiceUnavailable
|
||||||
|
case codes.DataLoss:
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
}
|
||||||
|
|
||||||
|
grpclog.Infof("Unknown gRPC error code: %v", code)
|
||||||
|
return http.StatusInternalServerError
|
||||||
|
}
|
||||||
|
|
||||||
|
// HTTPError uses the mux-configured error handler.
|
||||||
|
func HTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
|
||||||
|
mux.errorHandler(ctx, mux, marshaler, w, r, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultHTTPErrorHandler is the default error handler.
|
||||||
|
// If "err" is a gRPC Status, the function replies with the status code mapped by HTTPStatusFromCode.
|
||||||
|
// If "err" is a HTTPStatusError, the function replies with the status code provide by that struct. This is
|
||||||
|
// intended to allow passing through of specific statuses via the function set via WithRoutingErrorHandler
|
||||||
|
// for the ServeMux constructor to handle edge cases which the standard mappings in HTTPStatusFromCode
|
||||||
|
// are insufficient for.
|
||||||
|
// If otherwise, it replies with http.StatusInternalServerError.
|
||||||
|
//
|
||||||
|
// The response body written by this function is a Status message marshaled by the Marshaler.
|
||||||
|
func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) {
|
||||||
|
// return Internal when Marshal failed
|
||||||
|
const fallback = `{"code": 13, "message": "failed to marshal error message"}`
|
||||||
|
|
||||||
|
var customStatus *HTTPStatusError
|
||||||
|
if errors.As(err, &customStatus) {
|
||||||
|
err = customStatus.Err
|
||||||
|
}
|
||||||
|
|
||||||
|
s := status.Convert(err)
|
||||||
|
pb := s.Proto()
|
||||||
|
|
||||||
|
w.Header().Del("Trailer")
|
||||||
|
w.Header().Del("Transfer-Encoding")
|
||||||
|
|
||||||
|
contentType := marshaler.ContentType(pb)
|
||||||
|
w.Header().Set("Content-Type", contentType)
|
||||||
|
|
||||||
|
if s.Code() == codes.Unauthenticated {
|
||||||
|
w.Header().Set("WWW-Authenticate", s.Message())
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, merr := marshaler.Marshal(pb)
|
||||||
|
if merr != nil {
|
||||||
|
grpclog.Infof("Failed to marshal error message %q: %v", s, merr)
|
||||||
|
w.WriteHeader(http.StatusInternalServerError)
|
||||||
|
if _, err := io.WriteString(w, fallback); err != nil {
|
||||||
|
grpclog.Infof("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseServerMetadata(w, mux, md)
|
||||||
|
|
||||||
|
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||||
|
// Unless the request includes a TE header field indicating "trailers"
|
||||||
|
// is acceptable, as described in Section 4.3, a server SHOULD NOT
|
||||||
|
// generate trailer fields that it believes are necessary for the user
|
||||||
|
// agent to receive.
|
||||||
|
doForwardTrailers := requestAcceptsTrailers(r)
|
||||||
|
|
||||||
|
if doForwardTrailers {
|
||||||
|
handleForwardResponseTrailerHeader(w, md)
|
||||||
|
w.Header().Set("Transfer-Encoding", "chunked")
|
||||||
|
}
|
||||||
|
|
||||||
|
st := HTTPStatusFromCode(s.Code())
|
||||||
|
if customStatus != nil {
|
||||||
|
st = customStatus.HTTPStatus
|
||||||
|
}
|
||||||
|
|
||||||
|
w.WriteHeader(st)
|
||||||
|
if _, err := w.Write(buf); err != nil {
|
||||||
|
grpclog.Infof("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if doForwardTrailers {
|
||||||
|
handleForwardResponseTrailer(w, md)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
|
||||||
|
return status.Convert(err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultRoutingErrorHandler is our default handler for routing errors.
|
||||||
|
// By default http error codes mapped on the following error codes:
|
||||||
|
// NotFound -> grpc.NotFound
|
||||||
|
// StatusBadRequest -> grpc.InvalidArgument
|
||||||
|
// MethodNotAllowed -> grpc.Unimplemented
|
||||||
|
// Other -> grpc.Internal, method is not expecting to be called for anything else
|
||||||
|
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
|
||||||
|
sterr := status.Error(codes.Internal, "Unexpected routing error")
|
||||||
|
switch httpStatus {
|
||||||
|
case http.StatusBadRequest:
|
||||||
|
sterr = status.Error(codes.InvalidArgument, http.StatusText(httpStatus))
|
||||||
|
case http.StatusMethodNotAllowed:
|
||||||
|
sterr = status.Error(codes.Unimplemented, http.StatusText(httpStatus))
|
||||||
|
case http.StatusNotFound:
|
||||||
|
sterr = status.Error(codes.NotFound, http.StatusText(httpStatus))
|
||||||
|
}
|
||||||
|
mux.errorHandler(ctx, mux, marshaler, w, r, sterr)
|
||||||
|
}
|
165
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
Normal file
165
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
Normal file
|
@ -0,0 +1,165 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"sort"
|
||||||
|
|
||||||
|
"google.golang.org/genproto/protobuf/field_mask"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
)
|
||||||
|
|
||||||
|
func getFieldByName(fields protoreflect.FieldDescriptors, name string) protoreflect.FieldDescriptor {
|
||||||
|
fd := fields.ByName(protoreflect.Name(name))
|
||||||
|
if fd != nil {
|
||||||
|
return fd
|
||||||
|
}
|
||||||
|
|
||||||
|
return fields.ByJSONName(name)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body.
|
||||||
|
func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.FieldMask, error) {
|
||||||
|
fm := &field_mask.FieldMask{}
|
||||||
|
var root interface{}
|
||||||
|
|
||||||
|
if err := json.NewDecoder(r).Decode(&root); err != nil {
|
||||||
|
if err == io.EOF {
|
||||||
|
return fm, nil
|
||||||
|
}
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
queue := []fieldMaskPathItem{{node: root, msg: msg.ProtoReflect()}}
|
||||||
|
for len(queue) > 0 {
|
||||||
|
// dequeue an item
|
||||||
|
item := queue[0]
|
||||||
|
queue = queue[1:]
|
||||||
|
|
||||||
|
m, ok := item.node.(map[string]interface{})
|
||||||
|
switch {
|
||||||
|
case ok:
|
||||||
|
// if the item is an object, then enqueue all of its children
|
||||||
|
for k, v := range m {
|
||||||
|
if item.msg == nil {
|
||||||
|
return nil, fmt.Errorf("JSON structure did not match request type")
|
||||||
|
}
|
||||||
|
|
||||||
|
fd := getFieldByName(item.msg.Descriptor().Fields(), k)
|
||||||
|
if fd == nil {
|
||||||
|
return nil, fmt.Errorf("could not find field %q in %q", k, item.msg.Descriptor().FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
if isDynamicProtoMessage(fd.Message()) {
|
||||||
|
for _, p := range buildPathsBlindly(k, v) {
|
||||||
|
newPath := p
|
||||||
|
if item.path != "" {
|
||||||
|
newPath = item.path + "." + newPath
|
||||||
|
}
|
||||||
|
queue = append(queue, fieldMaskPathItem{path: newPath})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if isProtobufAnyMessage(fd.Message()) {
|
||||||
|
_, hasTypeField := v.(map[string]interface{})["@type"]
|
||||||
|
if hasTypeField {
|
||||||
|
queue = append(queue, fieldMaskPathItem{path: k})
|
||||||
|
continue
|
||||||
|
} else {
|
||||||
|
return nil, fmt.Errorf("could not find field @type in %q in message %q", k, item.msg.Descriptor().FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
child := fieldMaskPathItem{
|
||||||
|
node: v,
|
||||||
|
}
|
||||||
|
if item.path == "" {
|
||||||
|
child.path = string(fd.FullName().Name())
|
||||||
|
} else {
|
||||||
|
child.path = item.path + "." + string(fd.FullName().Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case fd.IsList(), fd.IsMap():
|
||||||
|
// As per: https://github.com/protocolbuffers/protobuf/blob/master/src/google/protobuf/field_mask.proto#L85-L86
|
||||||
|
// Do not recurse into repeated fields. The repeated field goes on the end of the path and we stop.
|
||||||
|
fm.Paths = append(fm.Paths, child.path)
|
||||||
|
case fd.Message() != nil:
|
||||||
|
child.msg = item.msg.Get(fd).Message()
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
queue = append(queue, child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
case len(item.path) > 0:
|
||||||
|
// otherwise, it's a leaf node so print its path
|
||||||
|
fm.Paths = append(fm.Paths, item.path)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Sort for deterministic output in the presence
|
||||||
|
// of repeated fields.
|
||||||
|
sort.Strings(fm.Paths)
|
||||||
|
|
||||||
|
return fm, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func isProtobufAnyMessage(md protoreflect.MessageDescriptor) bool {
|
||||||
|
return md != nil && (md.FullName() == "google.protobuf.Any")
|
||||||
|
}
|
||||||
|
|
||||||
|
func isDynamicProtoMessage(md protoreflect.MessageDescriptor) bool {
|
||||||
|
return md != nil && (md.FullName() == "google.protobuf.Struct" || md.FullName() == "google.protobuf.Value")
|
||||||
|
}
|
||||||
|
|
||||||
|
// buildPathsBlindly does not attempt to match proto field names to the
|
||||||
|
// json value keys. Instead it relies completely on the structure of
|
||||||
|
// the unmarshalled json contained within in.
|
||||||
|
// Returns a slice containing all subpaths with the root at the
|
||||||
|
// passed in name and json value.
|
||||||
|
func buildPathsBlindly(name string, in interface{}) []string {
|
||||||
|
m, ok := in.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
return []string{name}
|
||||||
|
}
|
||||||
|
|
||||||
|
var paths []string
|
||||||
|
queue := []fieldMaskPathItem{{path: name, node: m}}
|
||||||
|
for len(queue) > 0 {
|
||||||
|
cur := queue[0]
|
||||||
|
queue = queue[1:]
|
||||||
|
|
||||||
|
m, ok := cur.node.(map[string]interface{})
|
||||||
|
if !ok {
|
||||||
|
// This should never happen since we should always check that we only add
|
||||||
|
// nodes of type map[string]interface{} to the queue.
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for k, v := range m {
|
||||||
|
if mi, ok := v.(map[string]interface{}); ok {
|
||||||
|
queue = append(queue, fieldMaskPathItem{path: cur.path + "." + k, node: mi})
|
||||||
|
} else {
|
||||||
|
// This is not a struct, so there are no more levels to descend.
|
||||||
|
curPath := cur.path + "." + k
|
||||||
|
paths = append(paths, curPath)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return paths
|
||||||
|
}
|
||||||
|
|
||||||
|
// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask
|
||||||
|
type fieldMaskPathItem struct {
|
||||||
|
// the list of prior fields leading up to node connected by dots
|
||||||
|
path string
|
||||||
|
|
||||||
|
// a generic decoded json object the current item to inspect for further path extraction
|
||||||
|
node interface{}
|
||||||
|
|
||||||
|
// parent message
|
||||||
|
msg protoreflect.Message
|
||||||
|
}
|
223
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
Normal file
223
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
Normal file
|
@ -0,0 +1,223 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"google.golang.org/genproto/googleapis/api/httpbody"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ForwardResponseStream forwards the stream from gRPC server to REST client.
|
||||||
|
func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||||
|
f, ok := w.(http.Flusher)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Infof("Flush not supported in %T", w)
|
||||||
|
http.Error(w, "unexpected type of web server", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||||
|
http.Error(w, "unexpected error", http.StatusInternalServerError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
handleForwardResponseServerMetadata(w, mux, md)
|
||||||
|
|
||||||
|
w.Header().Set("Transfer-Encoding", "chunked")
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil {
|
||||||
|
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var delimiter []byte
|
||||||
|
if d, ok := marshaler.(Delimited); ok {
|
||||||
|
delimiter = d.Delimiter()
|
||||||
|
} else {
|
||||||
|
delimiter = []byte("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
var wroteHeader bool
|
||||||
|
for {
|
||||||
|
resp, err := recv()
|
||||||
|
if err == io.EOF {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||||
|
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if !wroteHeader {
|
||||||
|
w.Header().Set("Content-Type", marshaler.ContentType(resp))
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf []byte
|
||||||
|
httpBody, isHTTPBody := resp.(*httpbody.HttpBody)
|
||||||
|
switch {
|
||||||
|
case resp == nil:
|
||||||
|
buf, err = marshaler.Marshal(errorChunk(status.New(codes.Internal, "empty response")))
|
||||||
|
case isHTTPBody:
|
||||||
|
buf = httpBody.GetData()
|
||||||
|
default:
|
||||||
|
result := map[string]interface{}{"result": resp}
|
||||||
|
if rb, ok := resp.(responseBody); ok {
|
||||||
|
result["result"] = rb.XXX_ResponseBody()
|
||||||
|
}
|
||||||
|
|
||||||
|
buf, err = marshaler.Marshal(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("Failed to marshal response chunk: %v", err)
|
||||||
|
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, err = w.Write(buf); err != nil {
|
||||||
|
grpclog.Infof("Failed to send response chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
wroteHeader = true
|
||||||
|
if _, err = w.Write(delimiter); err != nil {
|
||||||
|
grpclog.Infof("Failed to send delimiter chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
f.Flush()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
|
||||||
|
for k, vs := range md.HeaderMD {
|
||||||
|
if h, ok := mux.outgoingHeaderMatcher(k); ok {
|
||||||
|
for _, v := range vs {
|
||||||
|
w.Header().Add(h, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) {
|
||||||
|
for k := range md.TrailerMD {
|
||||||
|
tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k))
|
||||||
|
w.Header().Add("Trailer", tKey)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) {
|
||||||
|
for k, vs := range md.TrailerMD {
|
||||||
|
tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)
|
||||||
|
for _, v := range vs {
|
||||||
|
w.Header().Add(tKey, v)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// responseBody interface contains method for getting field for marshaling to the response body
|
||||||
|
// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule`
|
||||||
|
type responseBody interface {
|
||||||
|
XXX_ResponseBody() interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client.
|
||||||
|
func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) {
|
||||||
|
md, ok := ServerMetadataFromContext(ctx)
|
||||||
|
if !ok {
|
||||||
|
grpclog.Infof("Failed to extract ServerMetadata from context")
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseServerMetadata(w, mux, md)
|
||||||
|
|
||||||
|
// RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2
|
||||||
|
// Unless the request includes a TE header field indicating "trailers"
|
||||||
|
// is acceptable, as described in Section 4.3, a server SHOULD NOT
|
||||||
|
// generate trailer fields that it believes are necessary for the user
|
||||||
|
// agent to receive.
|
||||||
|
doForwardTrailers := requestAcceptsTrailers(req)
|
||||||
|
|
||||||
|
if doForwardTrailers {
|
||||||
|
handleForwardResponseTrailerHeader(w, md)
|
||||||
|
w.Header().Set("Transfer-Encoding", "chunked")
|
||||||
|
}
|
||||||
|
|
||||||
|
handleForwardResponseTrailerHeader(w, md)
|
||||||
|
|
||||||
|
contentType := marshaler.ContentType(resp)
|
||||||
|
w.Header().Set("Content-Type", contentType)
|
||||||
|
|
||||||
|
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||||
|
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
var buf []byte
|
||||||
|
var err error
|
||||||
|
if rb, ok := resp.(responseBody); ok {
|
||||||
|
buf, err = marshaler.Marshal(rb.XXX_ResponseBody())
|
||||||
|
} else {
|
||||||
|
buf, err = marshaler.Marshal(resp)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("Marshal error: %v", err)
|
||||||
|
HTTPError(ctx, mux, marshaler, w, req, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err = w.Write(buf); err != nil {
|
||||||
|
grpclog.Infof("Failed to write response: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if doForwardTrailers {
|
||||||
|
handleForwardResponseTrailer(w, md)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestAcceptsTrailers(req *http.Request) bool {
|
||||||
|
te := req.Header.Get("TE")
|
||||||
|
return strings.Contains(strings.ToLower(te), "trailers")
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error {
|
||||||
|
if len(opts) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
for _, opt := range opts {
|
||||||
|
if err := opt(ctx, w, resp); err != nil {
|
||||||
|
grpclog.Infof("Error handling ForwardResponseOptions: %v", err)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
|
||||||
|
st := mux.streamErrorHandler(ctx, err)
|
||||||
|
msg := errorChunk(st)
|
||||||
|
if !wroteHeader {
|
||||||
|
w.Header().Set("Content-Type", marshaler.ContentType(msg))
|
||||||
|
w.WriteHeader(HTTPStatusFromCode(st.Code()))
|
||||||
|
}
|
||||||
|
buf, merr := marshaler.Marshal(msg)
|
||||||
|
if merr != nil {
|
||||||
|
grpclog.Infof("Failed to marshal an error: %v", merr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if _, werr := w.Write(buf); werr != nil {
|
||||||
|
grpclog.Infof("Failed to notify error to client: %v", werr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func errorChunk(st *status.Status) map[string]proto.Message {
|
||||||
|
return map[string]proto.Message{"error": st.Proto()}
|
||||||
|
}
|
32
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
generated
vendored
Normal file
32
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_httpbodyproto.go
generated
vendored
Normal file
|
@ -0,0 +1,32 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/genproto/googleapis/api/httpbody"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HTTPBodyMarshaler is a Marshaler which supports marshaling of a
|
||||||
|
// google.api.HttpBody message as the full response body if it is
|
||||||
|
// the actual message used as the response. If not, then this will
|
||||||
|
// simply fallback to the Marshaler specified as its default Marshaler.
|
||||||
|
type HTTPBodyMarshaler struct {
|
||||||
|
Marshaler
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentType returns its specified content type in case v is a
|
||||||
|
// google.api.HttpBody message, otherwise it will fall back to the default Marshalers
|
||||||
|
// content type.
|
||||||
|
func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
|
||||||
|
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
||||||
|
return httpBody.GetContentType()
|
||||||
|
}
|
||||||
|
return h.Marshaler.ContentType(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "v" by returning the body bytes if v is a
|
||||||
|
// google.api.HttpBody message, otherwise it falls back to the default Marshaler.
|
||||||
|
func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
if httpBody, ok := v.(*httpbody.HttpBody); ok {
|
||||||
|
return httpBody.Data, nil
|
||||||
|
}
|
||||||
|
return h.Marshaler.Marshal(v)
|
||||||
|
}
|
45
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
generated
vendored
Normal file
45
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_json.go
generated
vendored
Normal file
|
@ -0,0 +1,45 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON
|
||||||
|
// with the standard "encoding/json" package of Golang.
|
||||||
|
// Although it is generally faster for simple proto messages than JSONPb,
|
||||||
|
// it does not support advanced features of protobuf, e.g. map, oneof, ....
|
||||||
|
//
|
||||||
|
// The NewEncoder and NewDecoder types return *json.Encoder and
|
||||||
|
// *json.Decoder respectively.
|
||||||
|
type JSONBuiltin struct{}
|
||||||
|
|
||||||
|
// ContentType always Returns "application/json".
|
||||||
|
func (*JSONBuiltin) ContentType(_ interface{}) string {
|
||||||
|
return "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "v" into JSON
|
||||||
|
func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
return json.Marshal(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals JSON data into "v".
|
||||||
|
func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return json.Unmarshal(data, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||||
|
func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder {
|
||||||
|
return json.NewDecoder(r)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||||
|
func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder {
|
||||||
|
return json.NewEncoder(w)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Delimiter for newline encoded JSON streams.
|
||||||
|
func (j *JSONBuiltin) Delimiter() []byte {
|
||||||
|
return []byte("\n")
|
||||||
|
}
|
344
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
Normal file
344
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
Normal file
|
@ -0,0 +1,344 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"reflect"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// JSONPb is a Marshaler which marshals/unmarshals into/from JSON
|
||||||
|
// with the "google.golang.org/protobuf/encoding/protojson" marshaler.
|
||||||
|
// It supports the full functionality of protobuf unlike JSONBuiltin.
|
||||||
|
//
|
||||||
|
// The NewDecoder method returns a DecoderWrapper, so the underlying
|
||||||
|
// *json.Decoder methods can be used.
|
||||||
|
type JSONPb struct {
|
||||||
|
protojson.MarshalOptions
|
||||||
|
protojson.UnmarshalOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContentType always returns "application/json".
|
||||||
|
func (*JSONPb) ContentType(_ interface{}) string {
|
||||||
|
return "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "v" into JSON.
|
||||||
|
func (j *JSONPb) Marshal(v interface{}) ([]byte, error) {
|
||||||
|
if _, ok := v.(proto.Message); !ok {
|
||||||
|
return j.marshalNonProtoField(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
var buf bytes.Buffer
|
||||||
|
if err := j.marshalTo(&buf, v); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error {
|
||||||
|
p, ok := v.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
buf, err := j.marshalNonProtoField(v)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = w.Write(buf)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
b, err := j.MarshalOptions.Marshal(p)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
_, err = w.Write(b)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
// protoMessageType is stored to prevent constant lookup of the same type at runtime.
|
||||||
|
protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||||
|
)
|
||||||
|
|
||||||
|
// marshalNonProto marshals a non-message field of a protobuf message.
|
||||||
|
// This function does not correctly marshal arbitrary data structures into JSON,
|
||||||
|
// it is only capable of marshaling non-message field values of protobuf,
|
||||||
|
// i.e. primitive types, enums; pointers to primitives or enums; maps from
|
||||||
|
// integer/string types to primitives/enums/pointers to messages.
|
||||||
|
func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) {
|
||||||
|
if v == nil {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
for rv.Kind() == reflect.Ptr {
|
||||||
|
if rv.IsNil() {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
rv = rv.Elem()
|
||||||
|
}
|
||||||
|
|
||||||
|
if rv.Kind() == reflect.Slice {
|
||||||
|
if rv.IsNil() {
|
||||||
|
if j.EmitUnpopulated {
|
||||||
|
return []byte("[]"), nil
|
||||||
|
}
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if rv.Type().Elem().Implements(protoMessageType) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
err := buf.WriteByte('[')
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for i := 0; i < rv.Len(); i++ {
|
||||||
|
if i != 0 {
|
||||||
|
err = buf.WriteByte(',')
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if err = j.marshalTo(&buf, rv.Index(i).Interface().(proto.Message)); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = buf.WriteByte(']')
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if rv.Type().Elem().Implements(typeProtoEnum) {
|
||||||
|
var buf bytes.Buffer
|
||||||
|
err := buf.WriteByte('[')
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
for i := 0; i < rv.Len(); i++ {
|
||||||
|
if i != 0 {
|
||||||
|
err = buf.WriteByte(',')
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if j.UseEnumNumbers {
|
||||||
|
_, err = buf.WriteString(strconv.FormatInt(rv.Index(i).Int(), 10))
|
||||||
|
} else {
|
||||||
|
_, err = buf.WriteString("\"" + rv.Index(i).Interface().(protoEnum).String() + "\"")
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
err = buf.WriteByte(']')
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buf.Bytes(), nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if rv.Kind() == reflect.Map {
|
||||||
|
m := make(map[string]*json.RawMessage)
|
||||||
|
for _, k := range rv.MapKeys() {
|
||||||
|
buf, err := j.Marshal(rv.MapIndex(k).Interface())
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf)
|
||||||
|
}
|
||||||
|
if j.Indent != "" {
|
||||||
|
return json.MarshalIndent(m, "", j.Indent)
|
||||||
|
}
|
||||||
|
return json.Marshal(m)
|
||||||
|
}
|
||||||
|
if enum, ok := rv.Interface().(protoEnum); ok && !j.UseEnumNumbers {
|
||||||
|
return json.Marshal(enum.String())
|
||||||
|
}
|
||||||
|
return json.Marshal(rv.Interface())
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals JSON "data" into "v"
|
||||||
|
func (j *JSONPb) Unmarshal(data []byte, v interface{}) error {
|
||||||
|
return unmarshalJSONPb(data, j.UnmarshalOptions, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a Decoder which reads JSON stream from "r".
|
||||||
|
func (j *JSONPb) NewDecoder(r io.Reader) Decoder {
|
||||||
|
d := json.NewDecoder(r)
|
||||||
|
return DecoderWrapper{
|
||||||
|
Decoder: d,
|
||||||
|
UnmarshalOptions: j.UnmarshalOptions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecoderWrapper is a wrapper around a *json.Decoder that adds
|
||||||
|
// support for protos to the Decode method.
|
||||||
|
type DecoderWrapper struct {
|
||||||
|
*json.Decoder
|
||||||
|
protojson.UnmarshalOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode wraps the embedded decoder's Decode method to support
|
||||||
|
// protos using a jsonpb.Unmarshaler.
|
||||||
|
func (d DecoderWrapper) Decode(v interface{}) error {
|
||||||
|
return decodeJSONPb(d.Decoder, d.UnmarshalOptions, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder which writes JSON stream into "w".
|
||||||
|
func (j *JSONPb) NewEncoder(w io.Writer) Encoder {
|
||||||
|
return EncoderFunc(func(v interface{}) error {
|
||||||
|
if err := j.marshalTo(w, v); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
// mimic json.Encoder by adding a newline (makes output
|
||||||
|
// easier to read when it contains multiple encoded items)
|
||||||
|
_, err := w.Write(j.Delimiter())
|
||||||
|
return err
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
func unmarshalJSONPb(data []byte, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
|
||||||
|
d := json.NewDecoder(bytes.NewReader(data))
|
||||||
|
return decodeJSONPb(d, unmarshaler, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeJSONPb(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
|
||||||
|
p, ok := v.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
return decodeNonProtoField(d, unmarshaler, v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decode into bytes for marshalling
|
||||||
|
var b json.RawMessage
|
||||||
|
err := d.Decode(&b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmarshaler.Unmarshal([]byte(b), p)
|
||||||
|
}
|
||||||
|
|
||||||
|
func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions, v interface{}) error {
|
||||||
|
rv := reflect.ValueOf(v)
|
||||||
|
if rv.Kind() != reflect.Ptr {
|
||||||
|
return fmt.Errorf("%T is not a pointer", v)
|
||||||
|
}
|
||||||
|
for rv.Kind() == reflect.Ptr {
|
||||||
|
if rv.IsNil() {
|
||||||
|
rv.Set(reflect.New(rv.Type().Elem()))
|
||||||
|
}
|
||||||
|
if rv.Type().ConvertibleTo(typeProtoMessage) {
|
||||||
|
// Decode into bytes for marshalling
|
||||||
|
var b json.RawMessage
|
||||||
|
err := d.Decode(&b)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return unmarshaler.Unmarshal([]byte(b), rv.Interface().(proto.Message))
|
||||||
|
}
|
||||||
|
rv = rv.Elem()
|
||||||
|
}
|
||||||
|
if rv.Kind() == reflect.Map {
|
||||||
|
if rv.IsNil() {
|
||||||
|
rv.Set(reflect.MakeMap(rv.Type()))
|
||||||
|
}
|
||||||
|
conv, ok := convFromType[rv.Type().Key().Kind()]
|
||||||
|
if !ok {
|
||||||
|
return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key())
|
||||||
|
}
|
||||||
|
|
||||||
|
m := make(map[string]*json.RawMessage)
|
||||||
|
if err := d.Decode(&m); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
for k, v := range m {
|
||||||
|
result := conv.Call([]reflect.Value{reflect.ValueOf(k)})
|
||||||
|
if err := result[1].Interface(); err != nil {
|
||||||
|
return err.(error)
|
||||||
|
}
|
||||||
|
bk := result[0]
|
||||||
|
bv := reflect.New(rv.Type().Elem())
|
||||||
|
if v == nil {
|
||||||
|
null := json.RawMessage("null")
|
||||||
|
v = &null
|
||||||
|
}
|
||||||
|
if err := unmarshalJSONPb([]byte(*v), unmarshaler, bv.Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rv.SetMapIndex(bk, bv.Elem())
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if rv.Kind() == reflect.Slice {
|
||||||
|
var sl []json.RawMessage
|
||||||
|
if err := d.Decode(&sl); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if sl != nil {
|
||||||
|
rv.Set(reflect.MakeSlice(rv.Type(), 0, 0))
|
||||||
|
}
|
||||||
|
for _, item := range sl {
|
||||||
|
bv := reflect.New(rv.Type().Elem())
|
||||||
|
if err := unmarshalJSONPb([]byte(item), unmarshaler, bv.Interface()); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
rv.Set(reflect.Append(rv, bv.Elem()))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if _, ok := rv.Interface().(protoEnum); ok {
|
||||||
|
var repr interface{}
|
||||||
|
if err := d.Decode(&repr); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch v := repr.(type) {
|
||||||
|
case string:
|
||||||
|
// TODO(yugui) Should use proto.StructProperties?
|
||||||
|
return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface())
|
||||||
|
case float64:
|
||||||
|
rv.Set(reflect.ValueOf(int32(v)).Convert(rv.Type()))
|
||||||
|
return nil
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d.Decode(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
type protoEnum interface {
|
||||||
|
fmt.Stringer
|
||||||
|
EnumDescriptor() ([]byte, []int)
|
||||||
|
}
|
||||||
|
|
||||||
|
var typeProtoEnum = reflect.TypeOf((*protoEnum)(nil)).Elem()
|
||||||
|
|
||||||
|
var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem()
|
||||||
|
|
||||||
|
// Delimiter for newline encoded JSON streams.
|
||||||
|
func (j *JSONPb) Delimiter() []byte {
|
||||||
|
return []byte("\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
convFromType = map[reflect.Kind]reflect.Value{
|
||||||
|
reflect.String: reflect.ValueOf(String),
|
||||||
|
reflect.Bool: reflect.ValueOf(Bool),
|
||||||
|
reflect.Float64: reflect.ValueOf(Float64),
|
||||||
|
reflect.Float32: reflect.ValueOf(Float32),
|
||||||
|
reflect.Int64: reflect.ValueOf(Int64),
|
||||||
|
reflect.Int32: reflect.ValueOf(Int32),
|
||||||
|
reflect.Uint64: reflect.ValueOf(Uint64),
|
||||||
|
reflect.Uint32: reflect.ValueOf(Uint32),
|
||||||
|
reflect.Slice: reflect.ValueOf(Bytes),
|
||||||
|
}
|
||||||
|
)
|
63
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
generated
vendored
Normal file
63
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_proto.go
generated
vendored
Normal file
|
@ -0,0 +1,63 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
|
||||||
|
"errors"
|
||||||
|
"io/ioutil"
|
||||||
|
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes
|
||||||
|
type ProtoMarshaller struct{}
|
||||||
|
|
||||||
|
// ContentType always returns "application/octet-stream".
|
||||||
|
func (*ProtoMarshaller) ContentType(_ interface{}) string {
|
||||||
|
return "application/octet-stream"
|
||||||
|
}
|
||||||
|
|
||||||
|
// Marshal marshals "value" into Proto
|
||||||
|
func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) {
|
||||||
|
message, ok := value.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("unable to marshal non proto field")
|
||||||
|
}
|
||||||
|
return proto.Marshal(message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Unmarshal unmarshals proto "data" into "value"
|
||||||
|
func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error {
|
||||||
|
message, ok := value.(proto.Message)
|
||||||
|
if !ok {
|
||||||
|
return errors.New("unable to unmarshal non proto field")
|
||||||
|
}
|
||||||
|
return proto.Unmarshal(data, message)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDecoder returns a Decoder which reads proto stream from "reader".
|
||||||
|
func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder {
|
||||||
|
return DecoderFunc(func(value interface{}) error {
|
||||||
|
buffer, err := ioutil.ReadAll(reader)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
return marshaller.Unmarshal(buffer, value)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewEncoder returns an Encoder which writes proto stream into "writer".
|
||||||
|
func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder {
|
||||||
|
return EncoderFunc(func(value interface{}) error {
|
||||||
|
buffer, err := marshaller.Marshal(value)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
_, err = writer.Write(buffer)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
})
|
||||||
|
}
|
50
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
generated
vendored
Normal file
50
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Marshaler defines a conversion between byte sequence and gRPC payloads / fields.
|
||||||
|
type Marshaler interface {
|
||||||
|
// Marshal marshals "v" into byte sequence.
|
||||||
|
Marshal(v interface{}) ([]byte, error)
|
||||||
|
// Unmarshal unmarshals "data" into "v".
|
||||||
|
// "v" must be a pointer value.
|
||||||
|
Unmarshal(data []byte, v interface{}) error
|
||||||
|
// NewDecoder returns a Decoder which reads byte sequence from "r".
|
||||||
|
NewDecoder(r io.Reader) Decoder
|
||||||
|
// NewEncoder returns an Encoder which writes bytes sequence into "w".
|
||||||
|
NewEncoder(w io.Writer) Encoder
|
||||||
|
// ContentType returns the Content-Type which this marshaler is responsible for.
|
||||||
|
// The parameter describes the type which is being marshalled, which can sometimes
|
||||||
|
// affect the content type returned.
|
||||||
|
ContentType(v interface{}) string
|
||||||
|
}
|
||||||
|
|
||||||
|
// Decoder decodes a byte sequence
|
||||||
|
type Decoder interface {
|
||||||
|
Decode(v interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encoder encodes gRPC payloads / fields into byte sequence.
|
||||||
|
type Encoder interface {
|
||||||
|
Encode(v interface{}) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// DecoderFunc adapts an decoder function into Decoder.
|
||||||
|
type DecoderFunc func(v interface{}) error
|
||||||
|
|
||||||
|
// Decode delegates invocations to the underlying function itself.
|
||||||
|
func (f DecoderFunc) Decode(v interface{}) error { return f(v) }
|
||||||
|
|
||||||
|
// EncoderFunc adapts an encoder function into Encoder
|
||||||
|
type EncoderFunc func(v interface{}) error
|
||||||
|
|
||||||
|
// Encode delegates invocations to the underlying function itself.
|
||||||
|
func (f EncoderFunc) Encode(v interface{}) error { return f(v) }
|
||||||
|
|
||||||
|
// Delimited defines the streaming delimiter.
|
||||||
|
type Delimited interface {
|
||||||
|
// Delimiter returns the record separator for the stream.
|
||||||
|
Delimiter() []byte
|
||||||
|
}
|
109
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
generated
vendored
Normal file
109
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshaler_registry.go
generated
vendored
Normal file
|
@ -0,0 +1,109 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"mime"
|
||||||
|
"net/http"
|
||||||
|
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
|
)
|
||||||
|
|
||||||
|
// MIMEWildcard is the fallback MIME type used for requests which do not match
|
||||||
|
// a registered MIME type.
|
||||||
|
const MIMEWildcard = "*"
|
||||||
|
|
||||||
|
var (
|
||||||
|
acceptHeader = http.CanonicalHeaderKey("Accept")
|
||||||
|
contentTypeHeader = http.CanonicalHeaderKey("Content-Type")
|
||||||
|
|
||||||
|
defaultMarshaler = &HTTPBodyMarshaler{
|
||||||
|
Marshaler: &JSONPb{
|
||||||
|
MarshalOptions: protojson.MarshalOptions{
|
||||||
|
EmitUnpopulated: true,
|
||||||
|
},
|
||||||
|
UnmarshalOptions: protojson.UnmarshalOptions{
|
||||||
|
DiscardUnknown: true,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// MarshalerForRequest returns the inbound/outbound marshalers for this request.
|
||||||
|
// It checks the registry on the ServeMux for the MIME type set by the Content-Type header.
|
||||||
|
// If it isn't set (or the request Content-Type is empty), checks for "*".
|
||||||
|
// If there are multiple Content-Type headers set, choose the first one that it can
|
||||||
|
// exactly match in the registry.
|
||||||
|
// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler.
|
||||||
|
func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) {
|
||||||
|
for _, acceptVal := range r.Header[acceptHeader] {
|
||||||
|
if m, ok := mux.marshalers.mimeMap[acceptVal]; ok {
|
||||||
|
outbound = m
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, contentTypeVal := range r.Header[contentTypeHeader] {
|
||||||
|
contentType, _, err := mime.ParseMediaType(contentTypeVal)
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if m, ok := mux.marshalers.mimeMap[contentType]; ok {
|
||||||
|
inbound = m
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if inbound == nil {
|
||||||
|
inbound = mux.marshalers.mimeMap[MIMEWildcard]
|
||||||
|
}
|
||||||
|
if outbound == nil {
|
||||||
|
outbound = inbound
|
||||||
|
}
|
||||||
|
|
||||||
|
return inbound, outbound
|
||||||
|
}
|
||||||
|
|
||||||
|
// marshalerRegistry is a mapping from MIME types to Marshalers.
|
||||||
|
type marshalerRegistry struct {
|
||||||
|
mimeMap map[string]Marshaler
|
||||||
|
}
|
||||||
|
|
||||||
|
// add adds a marshaler for a case-sensitive MIME type string ("*" to match any
|
||||||
|
// MIME type).
|
||||||
|
func (m marshalerRegistry) add(mime string, marshaler Marshaler) error {
|
||||||
|
if len(mime) == 0 {
|
||||||
|
return errors.New("empty MIME type")
|
||||||
|
}
|
||||||
|
|
||||||
|
m.mimeMap[mime] = marshaler
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// makeMarshalerMIMERegistry returns a new registry of marshalers.
|
||||||
|
// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces.
|
||||||
|
//
|
||||||
|
// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler
|
||||||
|
// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler
|
||||||
|
// with a "application/json" Content-Type.
|
||||||
|
// "*" can be used to match any Content-Type.
|
||||||
|
// This can be attached to a ServerMux with the marshaler option.
|
||||||
|
func makeMarshalerMIMERegistry() marshalerRegistry {
|
||||||
|
return marshalerRegistry{
|
||||||
|
mimeMap: map[string]Marshaler{
|
||||||
|
MIMEWildcard: defaultMarshaler,
|
||||||
|
},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound
|
||||||
|
// Marshalers to a MIME type in mux.
|
||||||
|
func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption {
|
||||||
|
return func(mux *ServeMux) {
|
||||||
|
if err := mux.marshalers.add(mime, marshaler); err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,356 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"net/textproto"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
|
||||||
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
|
"google.golang.org/grpc/status"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// UnescapingMode defines the behavior of ServeMux when unescaping path parameters.
|
||||||
|
type UnescapingMode int
|
||||||
|
|
||||||
|
const (
|
||||||
|
// UnescapingModeLegacy is the default V2 behavior, which escapes the entire
|
||||||
|
// path string before doing any routing.
|
||||||
|
UnescapingModeLegacy UnescapingMode = iota
|
||||||
|
|
||||||
|
// EscapingTypeExceptReserved unescapes all path parameters except RFC 6570
|
||||||
|
// reserved characters.
|
||||||
|
UnescapingModeAllExceptReserved
|
||||||
|
|
||||||
|
// EscapingTypeExceptSlash unescapes URL path parameters except path
|
||||||
|
// seperators, which will be left as "%2F".
|
||||||
|
UnescapingModeAllExceptSlash
|
||||||
|
|
||||||
|
// URL path parameters will be fully decoded.
|
||||||
|
UnescapingModeAllCharacters
|
||||||
|
|
||||||
|
// UnescapingModeDefault is the default escaping type.
|
||||||
|
// TODO(v3): default this to UnescapingModeAllExceptReserved per grpc-httpjson-transcoding's
|
||||||
|
// reference implementation
|
||||||
|
UnescapingModeDefault = UnescapingModeLegacy
|
||||||
|
)
|
||||||
|
|
||||||
|
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
||||||
|
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
||||||
|
|
||||||
|
// ServeMux is a request multiplexer for grpc-gateway.
|
||||||
|
// It matches http requests to patterns and invokes the corresponding handler.
|
||||||
|
type ServeMux struct {
|
||||||
|
// handlers maps HTTP method to a list of handlers.
|
||||||
|
handlers map[string][]handler
|
||||||
|
forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error
|
||||||
|
marshalers marshalerRegistry
|
||||||
|
incomingHeaderMatcher HeaderMatcherFunc
|
||||||
|
outgoingHeaderMatcher HeaderMatcherFunc
|
||||||
|
metadataAnnotators []func(context.Context, *http.Request) metadata.MD
|
||||||
|
errorHandler ErrorHandlerFunc
|
||||||
|
streamErrorHandler StreamErrorHandlerFunc
|
||||||
|
routingErrorHandler RoutingErrorHandlerFunc
|
||||||
|
disablePathLengthFallback bool
|
||||||
|
unescapingMode UnescapingMode
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeMuxOption is an option that can be given to a ServeMux on construction.
|
||||||
|
type ServeMuxOption func(*ServeMux)
|
||||||
|
|
||||||
|
// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption.
|
||||||
|
//
|
||||||
|
// forwardResponseOption is an option that will be called on the relevant context.Context,
|
||||||
|
// http.ResponseWriter, and proto.Message before every forwarded response.
|
||||||
|
//
|
||||||
|
// The message may be nil in the case where just a header is being sent.
|
||||||
|
func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithEscapingType sets the escaping type. See the definitions of UnescapingMode
|
||||||
|
// for more information.
|
||||||
|
func WithUnescapingMode(mode UnescapingMode) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.unescapingMode = mode
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters.
|
||||||
|
// Configuring this will mean the generated OpenAPI output is no longer correct, and it should be
|
||||||
|
// done with careful consideration.
|
||||||
|
func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
currentQueryParser = queryParameterParser
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context.
|
||||||
|
type HeaderMatcherFunc func(string) (string, bool)
|
||||||
|
|
||||||
|
// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header
|
||||||
|
// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with
|
||||||
|
// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'.
|
||||||
|
func DefaultHeaderMatcher(key string) (string, bool) {
|
||||||
|
key = textproto.CanonicalMIMEHeaderKey(key)
|
||||||
|
if isPermanentHTTPHeader(key) {
|
||||||
|
return MetadataPrefix + key, true
|
||||||
|
} else if strings.HasPrefix(key, MetadataHeaderPrefix) {
|
||||||
|
return key[len(MetadataHeaderPrefix):], true
|
||||||
|
}
|
||||||
|
return "", false
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
|
||||||
|
//
|
||||||
|
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
|
||||||
|
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
|
||||||
|
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||||
|
return func(mux *ServeMux) {
|
||||||
|
mux.incomingHeaderMatcher = fn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
|
||||||
|
//
|
||||||
|
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
|
||||||
|
// passed to http response returned from gateway. To transform the header before passing to response,
|
||||||
|
// matcher should return modified header.
|
||||||
|
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||||
|
return func(mux *ServeMux) {
|
||||||
|
mux.outgoingHeaderMatcher = fn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
|
||||||
|
//
|
||||||
|
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case
|
||||||
|
// is reading token from cookie and adding it in gRPC context.
|
||||||
|
func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithErrorHandler returns a ServeMuxOption for configuring a custom error handler.
|
||||||
|
//
|
||||||
|
// This can be used to configure a custom error response.
|
||||||
|
func WithErrorHandler(fn ErrorHandlerFunc) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.errorHandler = fn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream
|
||||||
|
// error handler, which allows for customizing the error trailer for server-streaming
|
||||||
|
// calls.
|
||||||
|
//
|
||||||
|
// For stream errors that occur before any response has been written, the mux's
|
||||||
|
// ErrorHandler will be invoked. However, once data has been written, the errors must
|
||||||
|
// be handled differently: they must be included in the response body. The response body's
|
||||||
|
// final message will include the error details returned by the stream error handler.
|
||||||
|
func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.streamErrorHandler = fn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithRoutingErrorHandler returns a ServeMuxOption for configuring a custom error handler to handle http routing errors.
|
||||||
|
//
|
||||||
|
// Method called for errors which can happen before gRPC route selected or executed.
|
||||||
|
// The following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest
|
||||||
|
func WithRoutingErrorHandler(fn RoutingErrorHandlerFunc) ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.routingErrorHandler = fn
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback.
|
||||||
|
func WithDisablePathLengthFallback() ServeMuxOption {
|
||||||
|
return func(serveMux *ServeMux) {
|
||||||
|
serveMux.disablePathLengthFallback = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
||||||
|
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
||||||
|
serveMux := &ServeMux{
|
||||||
|
handlers: make(map[string][]handler),
|
||||||
|
forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0),
|
||||||
|
marshalers: makeMarshalerMIMERegistry(),
|
||||||
|
errorHandler: DefaultHTTPErrorHandler,
|
||||||
|
streamErrorHandler: DefaultStreamErrorHandler,
|
||||||
|
routingErrorHandler: DefaultRoutingErrorHandler,
|
||||||
|
unescapingMode: UnescapingModeDefault,
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, opt := range opts {
|
||||||
|
opt(serveMux)
|
||||||
|
}
|
||||||
|
|
||||||
|
if serveMux.incomingHeaderMatcher == nil {
|
||||||
|
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
|
||||||
|
}
|
||||||
|
|
||||||
|
if serveMux.outgoingHeaderMatcher == nil {
|
||||||
|
serveMux.outgoingHeaderMatcher = func(key string) (string, bool) {
|
||||||
|
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return serveMux
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle associates "h" to the pair of HTTP method and path pattern.
|
||||||
|
func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) {
|
||||||
|
s.handlers[meth] = append([]handler{{pat: pat, h: h}}, s.handlers[meth]...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HandlePath allows users to configure custom path handlers.
|
||||||
|
// refer: https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/inject_router/
|
||||||
|
func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) error {
|
||||||
|
compiler, err := httprule.Parse(pathPattern)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing path pattern: %w", err)
|
||||||
|
}
|
||||||
|
tp := compiler.Compile()
|
||||||
|
pattern, err := NewPattern(tp.Version, tp.OpCodes, tp.Pool, tp.Verb)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("creating new pattern: %w", err)
|
||||||
|
}
|
||||||
|
s.Handle(meth, pattern, h)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
|
||||||
|
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
|
ctx := r.Context()
|
||||||
|
|
||||||
|
path := r.URL.Path
|
||||||
|
if !strings.HasPrefix(path, "/") {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusBadRequest)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// TODO(v3): remove UnescapingModeLegacy
|
||||||
|
if s.unescapingMode != UnescapingModeLegacy && r.URL.RawPath != "" {
|
||||||
|
path = r.URL.RawPath
|
||||||
|
}
|
||||||
|
|
||||||
|
components := strings.Split(path[1:], "/")
|
||||||
|
|
||||||
|
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
|
||||||
|
r.Method = strings.ToUpper(override)
|
||||||
|
if err := r.ParseForm(); err != nil {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
sterr := status.Error(codes.InvalidArgument, err.Error())
|
||||||
|
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verb out here is to memoize for the fallback case below
|
||||||
|
var verb string
|
||||||
|
|
||||||
|
for _, h := range s.handlers[r.Method] {
|
||||||
|
// If the pattern has a verb, explicitly look for a suffix in the last
|
||||||
|
// component that matches a colon plus the verb. This allows us to
|
||||||
|
// handle some cases that otherwise can't be correctly handled by the
|
||||||
|
// former LastIndex case, such as when the verb literal itself contains
|
||||||
|
// a colon. This should work for all cases that have run through the
|
||||||
|
// parser because we know what verb we're looking for, however, there
|
||||||
|
// are still some cases that the parser itself cannot disambiguate. See
|
||||||
|
// the comment there if interested.
|
||||||
|
patVerb := h.pat.Verb()
|
||||||
|
l := len(components)
|
||||||
|
lastComponent := components[l-1]
|
||||||
|
var idx int = -1
|
||||||
|
if patVerb != "" && strings.HasSuffix(lastComponent, ":"+patVerb) {
|
||||||
|
idx = len(lastComponent) - len(patVerb) - 1
|
||||||
|
}
|
||||||
|
if idx == 0 {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if idx > 0 {
|
||||||
|
components[l-1], verb = lastComponent[:idx], lastComponent[idx+1:]
|
||||||
|
}
|
||||||
|
|
||||||
|
pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
|
||||||
|
if err != nil {
|
||||||
|
var mse MalformedSequenceError
|
||||||
|
if ok := errors.As(err, &mse); ok {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
|
||||||
|
HTTPStatus: http.StatusBadRequest,
|
||||||
|
Err: mse,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
h.h(w, r, pathParams)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
// lookup other methods to handle fallback from GET to POST and
|
||||||
|
// to determine if it is NotImplemented or NotFound.
|
||||||
|
for m, handlers := range s.handlers {
|
||||||
|
if m == r.Method {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
for _, h := range handlers {
|
||||||
|
pathParams, err := h.pat.MatchAndEscape(components, verb, s.unescapingMode)
|
||||||
|
if err != nil {
|
||||||
|
var mse MalformedSequenceError
|
||||||
|
if ok := errors.As(err, &mse); ok {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.errorHandler(ctx, s, outboundMarshaler, w, r, &HTTPStatusError{
|
||||||
|
HTTPStatus: http.StatusBadRequest,
|
||||||
|
Err: mse,
|
||||||
|
})
|
||||||
|
}
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
// X-HTTP-Method-Override is optional. Always allow fallback to POST.
|
||||||
|
if s.isPathLengthFallback(r) {
|
||||||
|
if err := r.ParseForm(); err != nil {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
sterr := status.Error(codes.InvalidArgument, err.Error())
|
||||||
|
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
h.h(w, r, pathParams)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusMethodNotAllowed)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
s.routingErrorHandler(ctx, s, outboundMarshaler, w, r, http.StatusNotFound)
|
||||||
|
}
|
||||||
|
|
||||||
|
// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux.
|
||||||
|
func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error {
|
||||||
|
return s.forwardResponseOptions
|
||||||
|
}
|
||||||
|
|
||||||
|
func (s *ServeMux) isPathLengthFallback(r *http.Request) bool {
|
||||||
|
return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded"
|
||||||
|
}
|
||||||
|
|
||||||
|
type handler struct {
|
||||||
|
pat Pattern
|
||||||
|
h HandlerFunc
|
||||||
|
}
|
383
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
generated
vendored
Normal file
383
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/pattern.go
generated
vendored
Normal file
|
@ -0,0 +1,383 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// ErrNotMatch indicates that the given HTTP request path does not match to the pattern.
|
||||||
|
ErrNotMatch = errors.New("not match to the path pattern")
|
||||||
|
// ErrInvalidPattern indicates that the given definition of Pattern is not valid.
|
||||||
|
ErrInvalidPattern = errors.New("invalid pattern")
|
||||||
|
// ErrMalformedSequence indicates that an escape sequence was malformed.
|
||||||
|
ErrMalformedSequence = errors.New("malformed escape sequence")
|
||||||
|
)
|
||||||
|
|
||||||
|
type MalformedSequenceError string
|
||||||
|
|
||||||
|
func (e MalformedSequenceError) Error() string {
|
||||||
|
return "malformed path escape " + strconv.Quote(string(e))
|
||||||
|
}
|
||||||
|
|
||||||
|
type op struct {
|
||||||
|
code utilities.OpCode
|
||||||
|
operand int
|
||||||
|
}
|
||||||
|
|
||||||
|
// Pattern is a template pattern of http request paths defined in
|
||||||
|
// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto
|
||||||
|
type Pattern struct {
|
||||||
|
// ops is a list of operations
|
||||||
|
ops []op
|
||||||
|
// pool is a constant pool indexed by the operands or vars.
|
||||||
|
pool []string
|
||||||
|
// vars is a list of variables names to be bound by this pattern
|
||||||
|
vars []string
|
||||||
|
// stacksize is the max depth of the stack
|
||||||
|
stacksize int
|
||||||
|
// tailLen is the length of the fixed-size segments after a deep wildcard
|
||||||
|
tailLen int
|
||||||
|
// verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part.
|
||||||
|
verb string
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewPattern returns a new Pattern from the given definition values.
|
||||||
|
// "ops" is a sequence of op codes. "pool" is a constant pool.
|
||||||
|
// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part.
|
||||||
|
// "version" must be 1 for now.
|
||||||
|
// It returns an error if the given definition is invalid.
|
||||||
|
func NewPattern(version int, ops []int, pool []string, verb string) (Pattern, error) {
|
||||||
|
if version != 1 {
|
||||||
|
grpclog.Infof("unsupported version: %d", version)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
l := len(ops)
|
||||||
|
if l%2 != 0 {
|
||||||
|
grpclog.Infof("odd number of ops codes: %d", l)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
typedOps []op
|
||||||
|
stack, maxstack int
|
||||||
|
tailLen int
|
||||||
|
pushMSeen bool
|
||||||
|
vars []string
|
||||||
|
)
|
||||||
|
for i := 0; i < l; i += 2 {
|
||||||
|
op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]}
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush:
|
||||||
|
if pushMSeen {
|
||||||
|
tailLen++
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpPushM:
|
||||||
|
if pushMSeen {
|
||||||
|
grpclog.Infof("pushM appears twice")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
pushMSeen = true
|
||||||
|
stack++
|
||||||
|
case utilities.OpLitPush:
|
||||||
|
if op.operand < 0 || len(pool) <= op.operand {
|
||||||
|
grpclog.Infof("negative literal index: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
if pushMSeen {
|
||||||
|
tailLen++
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
if op.operand <= 0 {
|
||||||
|
grpclog.Infof("negative concat size: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
stack -= op.operand
|
||||||
|
if stack < 0 {
|
||||||
|
grpclog.Info("stack underflow")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
stack++
|
||||||
|
case utilities.OpCapture:
|
||||||
|
if op.operand < 0 || len(pool) <= op.operand {
|
||||||
|
grpclog.Infof("variable name index out of bound: %d", op.operand)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
v := pool[op.operand]
|
||||||
|
op.operand = len(vars)
|
||||||
|
vars = append(vars, v)
|
||||||
|
stack--
|
||||||
|
if stack < 0 {
|
||||||
|
grpclog.Infof("stack underflow")
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
grpclog.Infof("invalid opcode: %d", op.code)
|
||||||
|
return Pattern{}, ErrInvalidPattern
|
||||||
|
}
|
||||||
|
|
||||||
|
if maxstack < stack {
|
||||||
|
maxstack = stack
|
||||||
|
}
|
||||||
|
typedOps = append(typedOps, op)
|
||||||
|
}
|
||||||
|
return Pattern{
|
||||||
|
ops: typedOps,
|
||||||
|
pool: pool,
|
||||||
|
vars: vars,
|
||||||
|
stacksize: maxstack,
|
||||||
|
tailLen: tailLen,
|
||||||
|
verb: verb,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization.
|
||||||
|
func MustPattern(p Pattern, err error) Pattern {
|
||||||
|
if err != nil {
|
||||||
|
grpclog.Fatalf("Pattern initialization failed: %v", err)
|
||||||
|
}
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchAndEscape examines components to determine if they match to a Pattern.
|
||||||
|
// MatchAndEscape will return an error if no Patterns matched or if a pattern
|
||||||
|
// matched but contained malformed escape sequences. If successful, the function
|
||||||
|
// returns a mapping from field paths to their captured values.
|
||||||
|
func (p Pattern) MatchAndEscape(components []string, verb string, unescapingMode UnescapingMode) (map[string]string, error) {
|
||||||
|
if p.verb != verb {
|
||||||
|
if p.verb != "" {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
if len(components) == 0 {
|
||||||
|
components = []string{":" + verb}
|
||||||
|
} else {
|
||||||
|
components = append([]string{}, components...)
|
||||||
|
components[len(components)-1] += ":" + verb
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var pos int
|
||||||
|
stack := make([]string, 0, p.stacksize)
|
||||||
|
captured := make([]string, len(p.vars))
|
||||||
|
l := len(components)
|
||||||
|
for _, op := range p.ops {
|
||||||
|
var err error
|
||||||
|
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush, utilities.OpLitPush:
|
||||||
|
if pos >= l {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
c := components[pos]
|
||||||
|
if op.code == utilities.OpLitPush {
|
||||||
|
if lit := p.pool[op.operand]; c != lit {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
} else if op.code == utilities.OpPush {
|
||||||
|
if c, err = unescape(c, unescapingMode, false); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
stack = append(stack, c)
|
||||||
|
pos++
|
||||||
|
case utilities.OpPushM:
|
||||||
|
end := len(components)
|
||||||
|
if end < pos+p.tailLen {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
end -= p.tailLen
|
||||||
|
c := strings.Join(components[pos:end], "/")
|
||||||
|
if c, err = unescape(c, unescapingMode, true); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
stack = append(stack, c)
|
||||||
|
pos = end
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
n := op.operand
|
||||||
|
l := len(stack) - n
|
||||||
|
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||||
|
case utilities.OpCapture:
|
||||||
|
n := len(stack) - 1
|
||||||
|
captured[op.operand] = stack[n]
|
||||||
|
stack = stack[:n]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if pos < l {
|
||||||
|
return nil, ErrNotMatch
|
||||||
|
}
|
||||||
|
bindings := make(map[string]string)
|
||||||
|
for i, val := range captured {
|
||||||
|
bindings[p.vars[i]] = val
|
||||||
|
}
|
||||||
|
return bindings, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// MatchAndEscape examines components to determine if they match to a Pattern.
|
||||||
|
// It will never perform per-component unescaping (see: UnescapingModeLegacy).
|
||||||
|
// MatchAndEscape will return an error if no Patterns matched. If successful,
|
||||||
|
// the function returns a mapping from field paths to their captured values.
|
||||||
|
//
|
||||||
|
// Deprecated: Use MatchAndEscape.
|
||||||
|
func (p Pattern) Match(components []string, verb string) (map[string]string, error) {
|
||||||
|
return p.MatchAndEscape(components, verb, UnescapingModeDefault)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Verb returns the verb part of the Pattern.
|
||||||
|
func (p Pattern) Verb() string { return p.verb }
|
||||||
|
|
||||||
|
func (p Pattern) String() string {
|
||||||
|
var stack []string
|
||||||
|
for _, op := range p.ops {
|
||||||
|
switch op.code {
|
||||||
|
case utilities.OpNop:
|
||||||
|
continue
|
||||||
|
case utilities.OpPush:
|
||||||
|
stack = append(stack, "*")
|
||||||
|
case utilities.OpLitPush:
|
||||||
|
stack = append(stack, p.pool[op.operand])
|
||||||
|
case utilities.OpPushM:
|
||||||
|
stack = append(stack, "**")
|
||||||
|
case utilities.OpConcatN:
|
||||||
|
n := op.operand
|
||||||
|
l := len(stack) - n
|
||||||
|
stack = append(stack[:l], strings.Join(stack[l:], "/"))
|
||||||
|
case utilities.OpCapture:
|
||||||
|
n := len(stack) - 1
|
||||||
|
stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
segs := strings.Join(stack, "/")
|
||||||
|
if p.verb != "" {
|
||||||
|
return fmt.Sprintf("/%s:%s", segs, p.verb)
|
||||||
|
}
|
||||||
|
return "/" + segs
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* The following code is adopted and modified from Go's standard library
|
||||||
|
* and carries the attached license.
|
||||||
|
*
|
||||||
|
* Copyright 2009 The Go Authors. All rights reserved.
|
||||||
|
* Use of this source code is governed by a BSD-style
|
||||||
|
* license that can be found in the LICENSE file.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// ishex returns whether or not the given byte is a valid hex character
|
||||||
|
func ishex(c byte) bool {
|
||||||
|
switch {
|
||||||
|
case '0' <= c && c <= '9':
|
||||||
|
return true
|
||||||
|
case 'a' <= c && c <= 'f':
|
||||||
|
return true
|
||||||
|
case 'A' <= c && c <= 'F':
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
func isRFC6570Reserved(c byte) bool {
|
||||||
|
switch c {
|
||||||
|
case '!', '#', '$', '&', '\'', '(', ')', '*',
|
||||||
|
'+', ',', '/', ':', ';', '=', '?', '@', '[', ']':
|
||||||
|
return true
|
||||||
|
default:
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// unhex converts a hex point to the bit representation
|
||||||
|
func unhex(c byte) byte {
|
||||||
|
switch {
|
||||||
|
case '0' <= c && c <= '9':
|
||||||
|
return c - '0'
|
||||||
|
case 'a' <= c && c <= 'f':
|
||||||
|
return c - 'a' + 10
|
||||||
|
case 'A' <= c && c <= 'F':
|
||||||
|
return c - 'A' + 10
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// shouldUnescapeWithMode returns true if the character is escapable with the
|
||||||
|
// given mode
|
||||||
|
func shouldUnescapeWithMode(c byte, mode UnescapingMode) bool {
|
||||||
|
switch mode {
|
||||||
|
case UnescapingModeAllExceptReserved:
|
||||||
|
if isRFC6570Reserved(c) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case UnescapingModeAllExceptSlash:
|
||||||
|
if c == '/' {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
case UnescapingModeAllCharacters:
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// unescape unescapes a path string using the provided mode
|
||||||
|
func unescape(s string, mode UnescapingMode, multisegment bool) (string, error) {
|
||||||
|
// TODO(v3): remove UnescapingModeLegacy
|
||||||
|
if mode == UnescapingModeLegacy {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !multisegment {
|
||||||
|
mode = UnescapingModeAllCharacters
|
||||||
|
}
|
||||||
|
|
||||||
|
// Count %, check that they're well-formed.
|
||||||
|
n := 0
|
||||||
|
for i := 0; i < len(s); {
|
||||||
|
if s[i] == '%' {
|
||||||
|
n++
|
||||||
|
if i+2 >= len(s) || !ishex(s[i+1]) || !ishex(s[i+2]) {
|
||||||
|
s = s[i:]
|
||||||
|
if len(s) > 3 {
|
||||||
|
s = s[:3]
|
||||||
|
}
|
||||||
|
|
||||||
|
return "", MalformedSequenceError(s)
|
||||||
|
}
|
||||||
|
i += 3
|
||||||
|
} else {
|
||||||
|
i++
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if n == 0 {
|
||||||
|
return s, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var t strings.Builder
|
||||||
|
t.Grow(len(s))
|
||||||
|
for i := 0; i < len(s); i++ {
|
||||||
|
switch s[i] {
|
||||||
|
case '%':
|
||||||
|
c := unhex(s[i+1])<<4 | unhex(s[i+2])
|
||||||
|
if shouldUnescapeWithMode(c, mode) {
|
||||||
|
t.WriteByte(c)
|
||||||
|
i += 2
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
default:
|
||||||
|
t.WriteByte(s[i])
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return t.String(), nil
|
||||||
|
}
|
80
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
generated
vendored
Normal file
80
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/proto2_convert.go
generated
vendored
Normal file
|
@ -0,0 +1,80 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
)
|
||||||
|
|
||||||
|
// StringP returns a pointer to a string whose pointee is same as the given string value.
|
||||||
|
func StringP(val string) (*string, error) {
|
||||||
|
return proto.String(val), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolP parses the given string representation of a boolean value,
|
||||||
|
// and returns a pointer to a bool whose value is same as the parsed value.
|
||||||
|
func BoolP(val string) (*bool, error) {
|
||||||
|
b, err := Bool(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Bool(b), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64P parses the given string representation of a floating point number,
|
||||||
|
// and returns a pointer to a float64 whose value is same as the parsed number.
|
||||||
|
func Float64P(val string) (*float64, error) {
|
||||||
|
f, err := Float64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Float64(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float32P parses the given string representation of a floating point number,
|
||||||
|
// and returns a pointer to a float32 whose value is same as the parsed number.
|
||||||
|
func Float32P(val string) (*float32, error) {
|
||||||
|
f, err := Float32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Float32(f), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a int64 whose value is same as the parsed integer.
|
||||||
|
func Int64P(val string) (*int64, error) {
|
||||||
|
i, err := Int64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Int64(i), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int32P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a int32 whose value is same as the parsed integer.
|
||||||
|
func Int32P(val string) (*int32, error) {
|
||||||
|
i, err := Int32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Int32(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a uint64 whose value is same as the parsed integer.
|
||||||
|
func Uint64P(val string) (*uint64, error) {
|
||||||
|
i, err := Uint64(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Uint64(i), err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32P parses the given string representation of an integer
|
||||||
|
// and returns a pointer to a uint32 whose value is same as the parsed integer.
|
||||||
|
func Uint32P(val string) (*uint32, error) {
|
||||||
|
i, err := Uint32(val)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return proto.Uint32(i), err
|
||||||
|
}
|
329
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
Normal file
329
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
Normal file
|
@ -0,0 +1,329 @@
|
||||||
|
package runtime
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/base64"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||||
|
"google.golang.org/genproto/protobuf/field_mask"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/protobuf/proto"
|
||||||
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
|
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||||
|
)
|
||||||
|
|
||||||
|
var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
|
||||||
|
|
||||||
|
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
|
||||||
|
|
||||||
|
// QueryParameterParser defines interface for all query parameter parsers
|
||||||
|
type QueryParameterParser interface {
|
||||||
|
Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopulateQueryParameters parses query parameters
|
||||||
|
// into "msg" using current query parser
|
||||||
|
func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||||
|
return currentQueryParser.Parse(msg, values, filter)
|
||||||
|
}
|
||||||
|
|
||||||
|
type defaultQueryParser struct{}
|
||||||
|
|
||||||
|
// Parse populates "values" into "msg".
|
||||||
|
// A value is ignored if its key starts with one of the elements in "filter".
|
||||||
|
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||||
|
for key, values := range values {
|
||||||
|
match := valuesKeyRegexp.FindStringSubmatch(key)
|
||||||
|
if len(match) == 3 {
|
||||||
|
key = match[1]
|
||||||
|
values = append([]string{match[2]}, values...)
|
||||||
|
}
|
||||||
|
fieldPath := strings.Split(key, ".")
|
||||||
|
if filter.HasCommonPrefix(fieldPath) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// PopulateFieldFromPath sets a value in a nested Protobuf structure.
|
||||||
|
func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error {
|
||||||
|
fieldPath := strings.Split(fieldPathString, ".")
|
||||||
|
return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
|
||||||
|
if len(fieldPath) < 1 {
|
||||||
|
return errors.New("no field path")
|
||||||
|
}
|
||||||
|
if len(values) < 1 {
|
||||||
|
return errors.New("no value provided")
|
||||||
|
}
|
||||||
|
|
||||||
|
var fieldDescriptor protoreflect.FieldDescriptor
|
||||||
|
for i, fieldName := range fieldPath {
|
||||||
|
fields := msgValue.Descriptor().Fields()
|
||||||
|
|
||||||
|
// Get field by name
|
||||||
|
fieldDescriptor = fields.ByName(protoreflect.Name(fieldName))
|
||||||
|
if fieldDescriptor == nil {
|
||||||
|
fieldDescriptor = fields.ByJSONName(fieldName)
|
||||||
|
if fieldDescriptor == nil {
|
||||||
|
// We're not returning an error here because this could just be
|
||||||
|
// an extra query parameter that isn't part of the request.
|
||||||
|
grpclog.Infof("field not found in %q: %q", msgValue.Descriptor().FullName(), strings.Join(fieldPath, "."))
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// If this is the last element, we're done
|
||||||
|
if i == len(fieldPath)-1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// Only singular message fields are allowed
|
||||||
|
if fieldDescriptor.Message() == nil || fieldDescriptor.Cardinality() == protoreflect.Repeated {
|
||||||
|
return fmt.Errorf("invalid path: %q is not a message", fieldName)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get the nested message
|
||||||
|
msgValue = msgValue.Mutable(fieldDescriptor).Message()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check if oneof already set
|
||||||
|
if of := fieldDescriptor.ContainingOneof(); of != nil {
|
||||||
|
if f := msgValue.WhichOneof(of); f != nil {
|
||||||
|
return fmt.Errorf("field already set for oneof %q", of.FullName().Name())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
switch {
|
||||||
|
case fieldDescriptor.IsList():
|
||||||
|
return populateRepeatedField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).List(), values)
|
||||||
|
case fieldDescriptor.IsMap():
|
||||||
|
return populateMapField(fieldDescriptor, msgValue.Mutable(fieldDescriptor).Map(), values)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(values) > 1 {
|
||||||
|
return fmt.Errorf("too many values for field %q: %s", fieldDescriptor.FullName().Name(), strings.Join(values, ", "))
|
||||||
|
}
|
||||||
|
|
||||||
|
return populateField(fieldDescriptor, msgValue, values[0])
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateField(fieldDescriptor protoreflect.FieldDescriptor, msgValue protoreflect.Message, value string) error {
|
||||||
|
v, err := parseField(fieldDescriptor, value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing field %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
msgValue.Set(fieldDescriptor, v)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateRepeatedField(fieldDescriptor protoreflect.FieldDescriptor, list protoreflect.List, values []string) error {
|
||||||
|
for _, value := range values {
|
||||||
|
v, err := parseField(fieldDescriptor, value)
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing list %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||||
|
}
|
||||||
|
list.Append(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func populateMapField(fieldDescriptor protoreflect.FieldDescriptor, mp protoreflect.Map, values []string) error {
|
||||||
|
if len(values) != 2 {
|
||||||
|
return fmt.Errorf("more than one value provided for key %q in map %q", values[0], fieldDescriptor.FullName())
|
||||||
|
}
|
||||||
|
|
||||||
|
key, err := parseField(fieldDescriptor.MapKey(), values[0])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing map key %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
value, err := parseField(fieldDescriptor.MapValue(), values[1])
|
||||||
|
if err != nil {
|
||||||
|
return fmt.Errorf("parsing map value %q: %w", fieldDescriptor.FullName().Name(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mp.Set(key.MapKey(), value)
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (protoreflect.Value, error) {
|
||||||
|
switch fieldDescriptor.Kind() {
|
||||||
|
case protoreflect.BoolKind:
|
||||||
|
v, err := strconv.ParseBool(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfBool(v), nil
|
||||||
|
case protoreflect.EnumKind:
|
||||||
|
enum, err := protoregistry.GlobalTypes.FindEnumByName(fieldDescriptor.Enum().FullName())
|
||||||
|
switch {
|
||||||
|
case errors.Is(err, protoregistry.NotFound):
|
||||||
|
return protoreflect.Value{}, fmt.Errorf("enum %q is not registered", fieldDescriptor.Enum().FullName())
|
||||||
|
case err != nil:
|
||||||
|
return protoreflect.Value{}, fmt.Errorf("failed to look up enum: %w", err)
|
||||||
|
}
|
||||||
|
// Look for enum by name
|
||||||
|
v := enum.Descriptor().Values().ByName(protoreflect.Name(value))
|
||||||
|
if v == nil {
|
||||||
|
i, err := strconv.Atoi(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
|
||||||
|
}
|
||||||
|
// Look for enum by number
|
||||||
|
v = enum.Descriptor().Values().ByNumber(protoreflect.EnumNumber(i))
|
||||||
|
if v == nil {
|
||||||
|
return protoreflect.Value{}, fmt.Errorf("%q is not a valid value", value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfEnum(v.Number()), nil
|
||||||
|
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
|
||||||
|
v, err := strconv.ParseInt(value, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfInt32(int32(v)), nil
|
||||||
|
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
|
||||||
|
v, err := strconv.ParseInt(value, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfInt64(v), nil
|
||||||
|
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
|
||||||
|
v, err := strconv.ParseUint(value, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfUint32(uint32(v)), nil
|
||||||
|
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
|
||||||
|
v, err := strconv.ParseUint(value, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfUint64(v), nil
|
||||||
|
case protoreflect.FloatKind:
|
||||||
|
v, err := strconv.ParseFloat(value, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfFloat32(float32(v)), nil
|
||||||
|
case protoreflect.DoubleKind:
|
||||||
|
v, err := strconv.ParseFloat(value, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfFloat64(v), nil
|
||||||
|
case protoreflect.StringKind:
|
||||||
|
return protoreflect.ValueOfString(value), nil
|
||||||
|
case protoreflect.BytesKind:
|
||||||
|
v, err := base64.URLEncoding.DecodeString(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
return protoreflect.ValueOfBytes(v), nil
|
||||||
|
case protoreflect.MessageKind, protoreflect.GroupKind:
|
||||||
|
return parseMessage(fieldDescriptor.Message(), value)
|
||||||
|
default:
|
||||||
|
panic(fmt.Sprintf("unknown field kind: %v", fieldDescriptor.Kind()))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (protoreflect.Value, error) {
|
||||||
|
var msg proto.Message
|
||||||
|
switch msgDescriptor.FullName() {
|
||||||
|
case "google.protobuf.Timestamp":
|
||||||
|
if value == "null" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
t, err := time.Parse(time.RFC3339Nano, value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = timestamppb.New(t)
|
||||||
|
case "google.protobuf.Duration":
|
||||||
|
if value == "null" {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
d, err := time.ParseDuration(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = durationpb.New(d)
|
||||||
|
case "google.protobuf.DoubleValue":
|
||||||
|
v, err := strconv.ParseFloat(value, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &wrapperspb.DoubleValue{Value: v}
|
||||||
|
case "google.protobuf.FloatValue":
|
||||||
|
v, err := strconv.ParseFloat(value, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &wrapperspb.FloatValue{Value: float32(v)}
|
||||||
|
case "google.protobuf.Int64Value":
|
||||||
|
v, err := strconv.ParseInt(value, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &wrapperspb.Int64Value{Value: v}
|
||||||
|
case "google.protobuf.Int32Value":
|
||||||
|
v, err := strconv.ParseInt(value, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &wrapperspb.Int32Value{Value: int32(v)}
|
||||||
|
case "google.protobuf.UInt64Value":
|
||||||
|
v, err := strconv.ParseUint(value, 10, 64)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &wrapperspb.UInt64Value{Value: v}
|
||||||
|
case "google.protobuf.UInt32Value":
|
||||||
|
v, err := strconv.ParseUint(value, 10, 32)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &wrapperspb.UInt32Value{Value: uint32(v)}
|
||||||
|
case "google.protobuf.BoolValue":
|
||||||
|
v, err := strconv.ParseBool(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &wrapperspb.BoolValue{Value: v}
|
||||||
|
case "google.protobuf.StringValue":
|
||||||
|
msg = &wrapperspb.StringValue{Value: value}
|
||||||
|
case "google.protobuf.BytesValue":
|
||||||
|
v, err := base64.URLEncoding.DecodeString(value)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &wrapperspb.BytesValue{Value: v}
|
||||||
|
case "google.protobuf.FieldMask":
|
||||||
|
fm := &field_mask.FieldMask{}
|
||||||
|
fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
|
||||||
|
msg = fm
|
||||||
|
default:
|
||||||
|
return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
|
||||||
|
}
|
||||||
|
|
||||||
|
return protoreflect.ValueOfMessage(msg.ProtoReflect()), nil
|
||||||
|
}
|
27
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
Normal file
27
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
Normal file
|
@ -0,0 +1,27 @@
|
||||||
|
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
|
||||||
|
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
go_library(
|
||||||
|
name = "utilities",
|
||||||
|
srcs = [
|
||||||
|
"doc.go",
|
||||||
|
"pattern.go",
|
||||||
|
"readerfactory.go",
|
||||||
|
"trie.go",
|
||||||
|
],
|
||||||
|
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
|
||||||
|
)
|
||||||
|
|
||||||
|
go_test(
|
||||||
|
name = "utilities_test",
|
||||||
|
size = "small",
|
||||||
|
srcs = ["trie_test.go"],
|
||||||
|
deps = [":utilities"],
|
||||||
|
)
|
||||||
|
|
||||||
|
alias(
|
||||||
|
name = "go_default_library",
|
||||||
|
actual = ":utilities",
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
|
@ -0,0 +1,2 @@
|
||||||
|
// Package utilities provides members for internal use in grpc-gateway.
|
||||||
|
package utilities
|
22
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
generated
vendored
Normal file
22
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/pattern.go
generated
vendored
Normal file
|
@ -0,0 +1,22 @@
|
||||||
|
package utilities
|
||||||
|
|
||||||
|
// An OpCode is a opcode of compiled path patterns.
|
||||||
|
type OpCode int
|
||||||
|
|
||||||
|
// These constants are the valid values of OpCode.
|
||||||
|
const (
|
||||||
|
// OpNop does nothing
|
||||||
|
OpNop = OpCode(iota)
|
||||||
|
// OpPush pushes a component to stack
|
||||||
|
OpPush
|
||||||
|
// OpLitPush pushes a component to stack if it matches to the literal
|
||||||
|
OpLitPush
|
||||||
|
// OpPushM concatenates the remaining components and pushes it to stack
|
||||||
|
OpPushM
|
||||||
|
// OpConcatN pops N items from stack, concatenates them and pushes it back to stack
|
||||||
|
OpConcatN
|
||||||
|
// OpCapture pops an item and binds it to the variable
|
||||||
|
OpCapture
|
||||||
|
// OpEnd is the least positive invalid opcode.
|
||||||
|
OpEnd
|
||||||
|
)
|
20
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
generated
vendored
Normal file
20
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/readerfactory.go
generated
vendored
Normal file
|
@ -0,0 +1,20 @@
|
||||||
|
package utilities
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"io"
|
||||||
|
"io/ioutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins
|
||||||
|
// at the start of the stream
|
||||||
|
func IOReaderFactory(r io.Reader) (func() io.Reader, error) {
|
||||||
|
b, err := ioutil.ReadAll(r)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return func() io.Reader {
|
||||||
|
return bytes.NewReader(b)
|
||||||
|
}, nil
|
||||||
|
}
|
174
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
generated
vendored
Normal file
174
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/trie.go
generated
vendored
Normal file
|
@ -0,0 +1,174 @@
|
||||||
|
package utilities
|
||||||
|
|
||||||
|
import (
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
// DoubleArray is a Double Array implementation of trie on sequences of strings.
|
||||||
|
type DoubleArray struct {
|
||||||
|
// Encoding keeps an encoding from string to int
|
||||||
|
Encoding map[string]int
|
||||||
|
// Base is the base array of Double Array
|
||||||
|
Base []int
|
||||||
|
// Check is the check array of Double Array
|
||||||
|
Check []int
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewDoubleArray builds a DoubleArray from a set of sequences of strings.
|
||||||
|
func NewDoubleArray(seqs [][]string) *DoubleArray {
|
||||||
|
da := &DoubleArray{Encoding: make(map[string]int)}
|
||||||
|
if len(seqs) == 0 {
|
||||||
|
return da
|
||||||
|
}
|
||||||
|
|
||||||
|
encoded := registerTokens(da, seqs)
|
||||||
|
sort.Sort(byLex(encoded))
|
||||||
|
|
||||||
|
root := node{row: -1, col: -1, left: 0, right: len(encoded)}
|
||||||
|
addSeqs(da, encoded, 0, root)
|
||||||
|
|
||||||
|
for i := len(da.Base); i > 0; i-- {
|
||||||
|
if da.Check[i-1] != 0 {
|
||||||
|
da.Base = da.Base[:i]
|
||||||
|
da.Check = da.Check[:i]
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return da
|
||||||
|
}
|
||||||
|
|
||||||
|
func registerTokens(da *DoubleArray, seqs [][]string) [][]int {
|
||||||
|
var result [][]int
|
||||||
|
for _, seq := range seqs {
|
||||||
|
var encoded []int
|
||||||
|
for _, token := range seq {
|
||||||
|
if _, ok := da.Encoding[token]; !ok {
|
||||||
|
da.Encoding[token] = len(da.Encoding)
|
||||||
|
}
|
||||||
|
encoded = append(encoded, da.Encoding[token])
|
||||||
|
}
|
||||||
|
result = append(result, encoded)
|
||||||
|
}
|
||||||
|
for i := range result {
|
||||||
|
result[i] = append(result[i], len(da.Encoding))
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
type node struct {
|
||||||
|
row, col int
|
||||||
|
left, right int
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) value(seqs [][]int) int {
|
||||||
|
return seqs[n.row][n.col]
|
||||||
|
}
|
||||||
|
|
||||||
|
func (n node) children(seqs [][]int) []*node {
|
||||||
|
var result []*node
|
||||||
|
lastVal := int(-1)
|
||||||
|
last := new(node)
|
||||||
|
for i := n.left; i < n.right; i++ {
|
||||||
|
if lastVal == seqs[i][n.col+1] {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
last.right = i
|
||||||
|
last = &node{
|
||||||
|
row: i,
|
||||||
|
col: n.col + 1,
|
||||||
|
left: i,
|
||||||
|
}
|
||||||
|
result = append(result, last)
|
||||||
|
}
|
||||||
|
last.right = n.right
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) {
|
||||||
|
ensureSize(da, pos)
|
||||||
|
|
||||||
|
children := n.children(seqs)
|
||||||
|
var i int
|
||||||
|
for i = 1; ; i++ {
|
||||||
|
ok := func() bool {
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
j := i + code
|
||||||
|
ensureSize(da, j)
|
||||||
|
if da.Check[j] != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}()
|
||||||
|
if ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
da.Base[pos] = i
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
j := i + code
|
||||||
|
da.Check[j] = pos + 1
|
||||||
|
}
|
||||||
|
terminator := len(da.Encoding)
|
||||||
|
for _, child := range children {
|
||||||
|
code := child.value(seqs)
|
||||||
|
if code == terminator {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
j := i + code
|
||||||
|
addSeqs(da, seqs, j, *child)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func ensureSize(da *DoubleArray, i int) {
|
||||||
|
for i >= len(da.Base) {
|
||||||
|
da.Base = append(da.Base, make([]int, len(da.Base)+1)...)
|
||||||
|
da.Check = append(da.Check, make([]int, len(da.Check)+1)...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
type byLex [][]int
|
||||||
|
|
||||||
|
func (l byLex) Len() int { return len(l) }
|
||||||
|
func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] }
|
||||||
|
func (l byLex) Less(i, j int) bool {
|
||||||
|
si := l[i]
|
||||||
|
sj := l[j]
|
||||||
|
var k int
|
||||||
|
for k = 0; k < len(si) && k < len(sj); k++ {
|
||||||
|
if si[k] < sj[k] {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if si[k] > sj[k] {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return k < len(sj)
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence.
|
||||||
|
func (da *DoubleArray) HasCommonPrefix(seq []string) bool {
|
||||||
|
if len(da.Base) == 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
var i int
|
||||||
|
for _, t := range seq {
|
||||||
|
code, ok := da.Encoding[t]
|
||||||
|
if !ok {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
j := da.Base[i] + code
|
||||||
|
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i = j
|
||||||
|
}
|
||||||
|
j := da.Base[i] + len(da.Encoding)
|
||||||
|
if len(da.Check) <= j || da.Check[j] != i+1 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
|
@ -3,6 +3,7 @@ package assert
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"reflect"
|
"reflect"
|
||||||
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
type CompareType int
|
type CompareType int
|
||||||
|
@ -30,6 +31,8 @@ var (
|
||||||
float64Type = reflect.TypeOf(float64(1))
|
float64Type = reflect.TypeOf(float64(1))
|
||||||
|
|
||||||
stringType = reflect.TypeOf("")
|
stringType = reflect.TypeOf("")
|
||||||
|
|
||||||
|
timeType = reflect.TypeOf(time.Time{})
|
||||||
)
|
)
|
||||||
|
|
||||||
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||||
|
@ -299,6 +302,27 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||||
return compareLess, true
|
return compareLess, true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
// Check for known struct types we can check for compare results.
|
||||||
|
case reflect.Struct:
|
||||||
|
{
|
||||||
|
// All structs enter here. We're not interested in most types.
|
||||||
|
if !canConvert(obj1Value, timeType) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
// time.Time can compared!
|
||||||
|
timeObj1, ok := obj1.(time.Time)
|
||||||
|
if !ok {
|
||||||
|
timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
|
||||||
|
}
|
||||||
|
|
||||||
|
timeObj2, ok := obj2.(time.Time)
|
||||||
|
if !ok {
|
||||||
|
timeObj2 = obj2Value.Convert(timeType).Interface().(time.Time)
|
||||||
|
}
|
||||||
|
|
||||||
|
return compare(timeObj1.UnixNano(), timeObj2.UnixNano(), reflect.Int64)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return compareEqual, false
|
return compareEqual, false
|
||||||
|
@ -310,7 +334,10 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
|
||||||
// assert.Greater(t, float64(2), float64(1))
|
// assert.Greater(t, float64(2), float64(1))
|
||||||
// assert.Greater(t, "b", "a")
|
// assert.Greater(t, "b", "a")
|
||||||
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||||
return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
|
if h, ok := t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
return compareTwoValues(t, e1, e2, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// GreaterOrEqual asserts that the first element is greater than or equal to the second
|
// GreaterOrEqual asserts that the first element is greater than or equal to the second
|
||||||
|
@ -320,7 +347,10 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
|
||||||
// assert.GreaterOrEqual(t, "b", "a")
|
// assert.GreaterOrEqual(t, "b", "a")
|
||||||
// assert.GreaterOrEqual(t, "b", "b")
|
// assert.GreaterOrEqual(t, "b", "b")
|
||||||
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||||
return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
|
if h, ok := t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
return compareTwoValues(t, e1, e2, []CompareType{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Less asserts that the first element is less than the second
|
// Less asserts that the first element is less than the second
|
||||||
|
@ -329,7 +359,10 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
|
||||||
// assert.Less(t, float64(1), float64(2))
|
// assert.Less(t, float64(1), float64(2))
|
||||||
// assert.Less(t, "a", "b")
|
// assert.Less(t, "a", "b")
|
||||||
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||||
return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
|
if h, ok := t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
return compareTwoValues(t, e1, e2, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// LessOrEqual asserts that the first element is less than or equal to the second
|
// LessOrEqual asserts that the first element is less than or equal to the second
|
||||||
|
@ -339,7 +372,10 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
|
||||||
// assert.LessOrEqual(t, "a", "b")
|
// assert.LessOrEqual(t, "a", "b")
|
||||||
// assert.LessOrEqual(t, "b", "b")
|
// assert.LessOrEqual(t, "b", "b")
|
||||||
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{}) bool {
|
||||||
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
|
if h, ok := t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
return compareTwoValues(t, e1, e2, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Positive asserts that the specified element is positive
|
// Positive asserts that the specified element is positive
|
||||||
|
@ -347,8 +383,11 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
|
||||||
// assert.Positive(t, 1)
|
// assert.Positive(t, 1)
|
||||||
// assert.Positive(t, 1.23)
|
// assert.Positive(t, 1.23)
|
||||||
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
||||||
|
if h, ok := t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
zero := reflect.Zero(reflect.TypeOf(e))
|
zero := reflect.Zero(reflect.TypeOf(e))
|
||||||
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs)
|
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareGreater}, "\"%v\" is not positive", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// Negative asserts that the specified element is negative
|
// Negative asserts that the specified element is negative
|
||||||
|
@ -356,8 +395,11 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
||||||
// assert.Negative(t, -1)
|
// assert.Negative(t, -1)
|
||||||
// assert.Negative(t, -1.23)
|
// assert.Negative(t, -1.23)
|
||||||
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
|
||||||
|
if h, ok := t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
zero := reflect.Zero(reflect.TypeOf(e))
|
zero := reflect.Zero(reflect.TypeOf(e))
|
||||||
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs)
|
return compareTwoValues(t, e, zero.Interface(), []CompareType{compareLess}, "\"%v\" is not negative", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
|
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []CompareType, failMessage string, msgAndArgs ...interface{}) bool {
|
||||||
|
|
16
vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
generated
vendored
Normal file
16
vendor/github.com/stretchr/testify/assert/assertion_compare_can_convert.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
//go:build go1.17
|
||||||
|
// +build go1.17
|
||||||
|
|
||||||
|
// TODO: once support for Go 1.16 is dropped, this file can be
|
||||||
|
// merged/removed with assertion_compare_go1.17_test.go and
|
||||||
|
// assertion_compare_legacy.go
|
||||||
|
|
||||||
|
package assert
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
// Wrapper around reflect.Value.CanConvert, for compatability
|
||||||
|
// reasons.
|
||||||
|
func canConvert(value reflect.Value, to reflect.Type) bool {
|
||||||
|
return value.CanConvert(to)
|
||||||
|
}
|
16
vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
generated
vendored
Normal file
16
vendor/github.com/stretchr/testify/assert/assertion_compare_legacy.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
||||||
|
//go:build !go1.17
|
||||||
|
// +build !go1.17
|
||||||
|
|
||||||
|
// TODO: once support for Go 1.16 is dropped, this file can be
|
||||||
|
// merged/removed with assertion_compare_go1.17_test.go and
|
||||||
|
// assertion_compare_can_convert.go
|
||||||
|
|
||||||
|
package assert
|
||||||
|
|
||||||
|
import "reflect"
|
||||||
|
|
||||||
|
// Older versions of Go does not have the reflect.Value.CanConvert
|
||||||
|
// method.
|
||||||
|
func canConvert(value reflect.Value, to reflect.Type) bool {
|
||||||
|
return false
|
||||||
|
}
|
|
@ -123,6 +123,18 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int
|
||||||
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
|
return ErrorAs(t, err, target, append([]interface{}{msg}, args...)...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
|
||||||
|
// and that the error contains the specified substring.
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
|
||||||
|
func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) bool {
|
||||||
|
if h, ok := t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
return ErrorContains(t, theError, contains, append([]interface{}{msg}, args...)...)
|
||||||
|
}
|
||||||
|
|
||||||
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
|
// ErrorIsf asserts that at least one of the errors in err's chain matches target.
|
||||||
// This is a wrapper for errors.Is.
|
// This is a wrapper for errors.Is.
|
||||||
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
|
func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface{}) bool {
|
||||||
|
|
|
@ -222,6 +222,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ..
|
||||||
return ErrorAsf(a.t, err, target, msg, args...)
|
return ErrorAsf(a.t, err, target, msg, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrorContains asserts that a function returned an error (i.e. not `nil`)
|
||||||
|
// and that the error contains the specified substring.
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// a.ErrorContains(err, expectedErrorSubString)
|
||||||
|
func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) bool {
|
||||||
|
if h, ok := a.t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
return ErrorContains(a.t, theError, contains, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
|
||||||
|
// and that the error contains the specified substring.
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
|
||||||
|
func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) bool {
|
||||||
|
if h, ok := a.t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
return ErrorContainsf(a.t, theError, contains, msg, args...)
|
||||||
|
}
|
||||||
|
|
||||||
// ErrorIs asserts that at least one of the errors in err's chain matches target.
|
// ErrorIs asserts that at least one of the errors in err's chain matches target.
|
||||||
// This is a wrapper for errors.Is.
|
// This is a wrapper for errors.Is.
|
||||||
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
|
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) bool {
|
||||||
|
|
|
@ -50,7 +50,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []CompareT
|
||||||
// assert.IsIncreasing(t, []float{1, 2})
|
// assert.IsIncreasing(t, []float{1, 2})
|
||||||
// assert.IsIncreasing(t, []string{"a", "b"})
|
// assert.IsIncreasing(t, []string{"a", "b"})
|
||||||
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||||
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs)
|
return isOrdered(t, object, []CompareType{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNonIncreasing asserts that the collection is not increasing
|
// IsNonIncreasing asserts that the collection is not increasing
|
||||||
|
@ -59,7 +59,7 @@ func IsIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
|
||||||
// assert.IsNonIncreasing(t, []float{2, 1})
|
// assert.IsNonIncreasing(t, []float{2, 1})
|
||||||
// assert.IsNonIncreasing(t, []string{"b", "a"})
|
// assert.IsNonIncreasing(t, []string{"b", "a"})
|
||||||
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||||
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs)
|
return isOrdered(t, object, []CompareType{compareEqual, compareGreater}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsDecreasing asserts that the collection is decreasing
|
// IsDecreasing asserts that the collection is decreasing
|
||||||
|
@ -68,7 +68,7 @@ func IsNonIncreasing(t TestingT, object interface{}, msgAndArgs ...interface{})
|
||||||
// assert.IsDecreasing(t, []float{2, 1})
|
// assert.IsDecreasing(t, []float{2, 1})
|
||||||
// assert.IsDecreasing(t, []string{"b", "a"})
|
// assert.IsDecreasing(t, []string{"b", "a"})
|
||||||
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||||
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs)
|
return isOrdered(t, object, []CompareType{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsNonDecreasing asserts that the collection is not decreasing
|
// IsNonDecreasing asserts that the collection is not decreasing
|
||||||
|
@ -77,5 +77,5 @@ func IsDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) boo
|
||||||
// assert.IsNonDecreasing(t, []float{1, 2})
|
// assert.IsNonDecreasing(t, []float{1, 2})
|
||||||
// assert.IsNonDecreasing(t, []string{"a", "b"})
|
// assert.IsNonDecreasing(t, []string{"a", "b"})
|
||||||
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
func IsNonDecreasing(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
|
||||||
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs)
|
return isOrdered(t, object, []CompareType{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
|
@ -718,10 +718,14 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte
|
||||||
// return (false, false) if impossible.
|
// return (false, false) if impossible.
|
||||||
// return (true, false) if element was not found.
|
// return (true, false) if element was not found.
|
||||||
// return (true, true) if element was found.
|
// return (true, true) if element was found.
|
||||||
func includeElement(list interface{}, element interface{}) (ok, found bool) {
|
func containsElement(list interface{}, element interface{}) (ok, found bool) {
|
||||||
|
|
||||||
listValue := reflect.ValueOf(list)
|
listValue := reflect.ValueOf(list)
|
||||||
listKind := reflect.TypeOf(list).Kind()
|
listType := reflect.TypeOf(list)
|
||||||
|
if listType == nil {
|
||||||
|
return false, false
|
||||||
|
}
|
||||||
|
listKind := listType.Kind()
|
||||||
defer func() {
|
defer func() {
|
||||||
if e := recover(); e != nil {
|
if e := recover(); e != nil {
|
||||||
ok = false
|
ok = false
|
||||||
|
@ -764,7 +768,7 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
|
||||||
h.Helper()
|
h.Helper()
|
||||||
}
|
}
|
||||||
|
|
||||||
ok, found := includeElement(s, contains)
|
ok, found := containsElement(s, contains)
|
||||||
if !ok {
|
if !ok {
|
||||||
return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
|
return Fail(t, fmt.Sprintf("%#v could not be applied builtin len()", s), msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
@ -787,7 +791,7 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
|
||||||
h.Helper()
|
h.Helper()
|
||||||
}
|
}
|
||||||
|
|
||||||
ok, found := includeElement(s, contains)
|
ok, found := containsElement(s, contains)
|
||||||
if !ok {
|
if !ok {
|
||||||
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
|
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", s), msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
@ -831,7 +835,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
|
||||||
|
|
||||||
for i := 0; i < subsetValue.Len(); i++ {
|
for i := 0; i < subsetValue.Len(); i++ {
|
||||||
element := subsetValue.Index(i).Interface()
|
element := subsetValue.Index(i).Interface()
|
||||||
ok, found := includeElement(list, element)
|
ok, found := containsElement(list, element)
|
||||||
if !ok {
|
if !ok {
|
||||||
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
|
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
@ -852,7 +856,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
|
||||||
h.Helper()
|
h.Helper()
|
||||||
}
|
}
|
||||||
if subset == nil {
|
if subset == nil {
|
||||||
return Fail(t, fmt.Sprintf("nil is the empty set which is a subset of every set"), msgAndArgs...)
|
return Fail(t, "nil is the empty set which is a subset of every set", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
subsetValue := reflect.ValueOf(subset)
|
subsetValue := reflect.ValueOf(subset)
|
||||||
|
@ -875,7 +879,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
|
||||||
|
|
||||||
for i := 0; i < subsetValue.Len(); i++ {
|
for i := 0; i < subsetValue.Len(); i++ {
|
||||||
element := subsetValue.Index(i).Interface()
|
element := subsetValue.Index(i).Interface()
|
||||||
ok, found := includeElement(list, element)
|
ok, found := containsElement(list, element)
|
||||||
if !ok {
|
if !ok {
|
||||||
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
|
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
@ -1000,27 +1004,21 @@ func Condition(t TestingT, comp Comparison, msgAndArgs ...interface{}) bool {
|
||||||
type PanicTestFunc func()
|
type PanicTestFunc func()
|
||||||
|
|
||||||
// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
|
// didPanic returns true if the function passed to it panics. Otherwise, it returns false.
|
||||||
func didPanic(f PanicTestFunc) (bool, interface{}, string) {
|
func didPanic(f PanicTestFunc) (didPanic bool, message interface{}, stack string) {
|
||||||
|
didPanic = true
|
||||||
didPanic := false
|
|
||||||
var message interface{}
|
|
||||||
var stack string
|
|
||||||
func() {
|
|
||||||
|
|
||||||
defer func() {
|
|
||||||
if message = recover(); message != nil {
|
|
||||||
didPanic = true
|
|
||||||
stack = string(debug.Stack())
|
|
||||||
}
|
|
||||||
}()
|
|
||||||
|
|
||||||
// call the target function
|
|
||||||
f()
|
|
||||||
|
|
||||||
|
defer func() {
|
||||||
|
message = recover()
|
||||||
|
if didPanic {
|
||||||
|
stack = string(debug.Stack())
|
||||||
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
return didPanic, message, stack
|
// call the target function
|
||||||
|
f()
|
||||||
|
didPanic = false
|
||||||
|
|
||||||
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
// Panics asserts that the code inside the specified PanicTestFunc panics.
|
// Panics asserts that the code inside the specified PanicTestFunc panics.
|
||||||
|
@ -1161,11 +1159,15 @@ func InDelta(t TestingT, expected, actual interface{}, delta float64, msgAndArgs
|
||||||
bf, bok := toFloat(actual)
|
bf, bok := toFloat(actual)
|
||||||
|
|
||||||
if !aok || !bok {
|
if !aok || !bok {
|
||||||
return Fail(t, fmt.Sprintf("Parameters must be numerical"), msgAndArgs...)
|
return Fail(t, "Parameters must be numerical", msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
if math.IsNaN(af) && math.IsNaN(bf) {
|
||||||
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
if math.IsNaN(af) {
|
if math.IsNaN(af) {
|
||||||
return Fail(t, fmt.Sprintf("Expected must not be NaN"), msgAndArgs...)
|
return Fail(t, "Expected must not be NaN", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
if math.IsNaN(bf) {
|
if math.IsNaN(bf) {
|
||||||
|
@ -1188,7 +1190,7 @@ func InDeltaSlice(t TestingT, expected, actual interface{}, delta float64, msgAn
|
||||||
if expected == nil || actual == nil ||
|
if expected == nil || actual == nil ||
|
||||||
reflect.TypeOf(actual).Kind() != reflect.Slice ||
|
reflect.TypeOf(actual).Kind() != reflect.Slice ||
|
||||||
reflect.TypeOf(expected).Kind() != reflect.Slice {
|
reflect.TypeOf(expected).Kind() != reflect.Slice {
|
||||||
return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
|
return Fail(t, "Parameters must be slice", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
actualSlice := reflect.ValueOf(actual)
|
actualSlice := reflect.ValueOf(actual)
|
||||||
|
@ -1250,8 +1252,12 @@ func InDeltaMapValues(t TestingT, expected, actual interface{}, delta float64, m
|
||||||
|
|
||||||
func calcRelativeError(expected, actual interface{}) (float64, error) {
|
func calcRelativeError(expected, actual interface{}) (float64, error) {
|
||||||
af, aok := toFloat(expected)
|
af, aok := toFloat(expected)
|
||||||
if !aok {
|
bf, bok := toFloat(actual)
|
||||||
return 0, fmt.Errorf("expected value %q cannot be converted to float", expected)
|
if !aok || !bok {
|
||||||
|
return 0, fmt.Errorf("Parameters must be numerical")
|
||||||
|
}
|
||||||
|
if math.IsNaN(af) && math.IsNaN(bf) {
|
||||||
|
return 0, nil
|
||||||
}
|
}
|
||||||
if math.IsNaN(af) {
|
if math.IsNaN(af) {
|
||||||
return 0, errors.New("expected value must not be NaN")
|
return 0, errors.New("expected value must not be NaN")
|
||||||
|
@ -1259,10 +1265,6 @@ func calcRelativeError(expected, actual interface{}) (float64, error) {
|
||||||
if af == 0 {
|
if af == 0 {
|
||||||
return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
|
return 0, fmt.Errorf("expected value must have a value other than zero to calculate the relative error")
|
||||||
}
|
}
|
||||||
bf, bok := toFloat(actual)
|
|
||||||
if !bok {
|
|
||||||
return 0, fmt.Errorf("actual value %q cannot be converted to float", actual)
|
|
||||||
}
|
|
||||||
if math.IsNaN(bf) {
|
if math.IsNaN(bf) {
|
||||||
return 0, errors.New("actual value must not be NaN")
|
return 0, errors.New("actual value must not be NaN")
|
||||||
}
|
}
|
||||||
|
@ -1298,7 +1300,7 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
|
||||||
if expected == nil || actual == nil ||
|
if expected == nil || actual == nil ||
|
||||||
reflect.TypeOf(actual).Kind() != reflect.Slice ||
|
reflect.TypeOf(actual).Kind() != reflect.Slice ||
|
||||||
reflect.TypeOf(expected).Kind() != reflect.Slice {
|
reflect.TypeOf(expected).Kind() != reflect.Slice {
|
||||||
return Fail(t, fmt.Sprintf("Parameters must be slice"), msgAndArgs...)
|
return Fail(t, "Parameters must be slice", msgAndArgs...)
|
||||||
}
|
}
|
||||||
|
|
||||||
actualSlice := reflect.ValueOf(actual)
|
actualSlice := reflect.ValueOf(actual)
|
||||||
|
@ -1375,6 +1377,27 @@ func EqualError(t TestingT, theError error, errString string, msgAndArgs ...inte
|
||||||
return true
|
return true
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrorContains asserts that a function returned an error (i.e. not `nil`)
|
||||||
|
// and that the error contains the specified substring.
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// assert.ErrorContains(t, err, expectedErrorSubString)
|
||||||
|
func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) bool {
|
||||||
|
if h, ok := t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
if !Error(t, theError, msgAndArgs...) {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
actual := theError.Error()
|
||||||
|
if !strings.Contains(actual, contains) {
|
||||||
|
return Fail(t, fmt.Sprintf("Error %#v does not contain %#v", actual, contains), msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
// matchRegexp return true if a specified regexp matches a string.
|
// matchRegexp return true if a specified regexp matches a string.
|
||||||
func matchRegexp(rx interface{}, str interface{}) bool {
|
func matchRegexp(rx interface{}, str interface{}) bool {
|
||||||
|
|
||||||
|
@ -1588,12 +1611,17 @@ func diff(expected interface{}, actual interface{}) string {
|
||||||
}
|
}
|
||||||
|
|
||||||
var e, a string
|
var e, a string
|
||||||
if et != reflect.TypeOf("") {
|
|
||||||
e = spewConfig.Sdump(expected)
|
switch et {
|
||||||
a = spewConfig.Sdump(actual)
|
case reflect.TypeOf(""):
|
||||||
} else {
|
|
||||||
e = reflect.ValueOf(expected).String()
|
e = reflect.ValueOf(expected).String()
|
||||||
a = reflect.ValueOf(actual).String()
|
a = reflect.ValueOf(actual).String()
|
||||||
|
case reflect.TypeOf(time.Time{}):
|
||||||
|
e = spewConfigStringerEnabled.Sdump(expected)
|
||||||
|
a = spewConfigStringerEnabled.Sdump(actual)
|
||||||
|
default:
|
||||||
|
e = spewConfig.Sdump(expected)
|
||||||
|
a = spewConfig.Sdump(actual)
|
||||||
}
|
}
|
||||||
|
|
||||||
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
|
diff, _ := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{
|
||||||
|
@ -1625,6 +1653,14 @@ var spewConfig = spew.ConfigState{
|
||||||
MaxDepth: 10,
|
MaxDepth: 10,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var spewConfigStringerEnabled = spew.ConfigState{
|
||||||
|
Indent: " ",
|
||||||
|
DisablePointerAddresses: true,
|
||||||
|
DisableCapacities: true,
|
||||||
|
SortKeys: true,
|
||||||
|
MaxDepth: 10,
|
||||||
|
}
|
||||||
|
|
||||||
type tHelper interface {
|
type tHelper interface {
|
||||||
Helper()
|
Helper()
|
||||||
}
|
}
|
||||||
|
|
|
@ -280,6 +280,36 @@ func ErrorAsf(t TestingT, err error, target interface{}, msg string, args ...int
|
||||||
t.FailNow()
|
t.FailNow()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrorContains asserts that a function returned an error (i.e. not `nil`)
|
||||||
|
// and that the error contains the specified substring.
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// assert.ErrorContains(t, err, expectedErrorSubString)
|
||||||
|
func ErrorContains(t TestingT, theError error, contains string, msgAndArgs ...interface{}) {
|
||||||
|
if h, ok := t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
if assert.ErrorContains(t, theError, contains, msgAndArgs...) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
|
||||||
|
// and that the error contains the specified substring.
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// assert.ErrorContainsf(t, err, expectedErrorSubString, "error message %s", "formatted")
|
||||||
|
func ErrorContainsf(t TestingT, theError error, contains string, msg string, args ...interface{}) {
|
||||||
|
if h, ok := t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
if assert.ErrorContainsf(t, theError, contains, msg, args...) {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
t.FailNow()
|
||||||
|
}
|
||||||
|
|
||||||
// ErrorIs asserts that at least one of the errors in err's chain matches target.
|
// ErrorIs asserts that at least one of the errors in err's chain matches target.
|
||||||
// This is a wrapper for errors.Is.
|
// This is a wrapper for errors.Is.
|
||||||
func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
|
func ErrorIs(t TestingT, err error, target error, msgAndArgs ...interface{}) {
|
||||||
|
|
|
@ -223,6 +223,30 @@ func (a *Assertions) ErrorAsf(err error, target interface{}, msg string, args ..
|
||||||
ErrorAsf(a.t, err, target, msg, args...)
|
ErrorAsf(a.t, err, target, msg, args...)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// ErrorContains asserts that a function returned an error (i.e. not `nil`)
|
||||||
|
// and that the error contains the specified substring.
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// a.ErrorContains(err, expectedErrorSubString)
|
||||||
|
func (a *Assertions) ErrorContains(theError error, contains string, msgAndArgs ...interface{}) {
|
||||||
|
if h, ok := a.t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
ErrorContains(a.t, theError, contains, msgAndArgs...)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorContainsf asserts that a function returned an error (i.e. not `nil`)
|
||||||
|
// and that the error contains the specified substring.
|
||||||
|
//
|
||||||
|
// actualObj, err := SomeFunction()
|
||||||
|
// a.ErrorContainsf(err, expectedErrorSubString, "error message %s", "formatted")
|
||||||
|
func (a *Assertions) ErrorContainsf(theError error, contains string, msg string, args ...interface{}) {
|
||||||
|
if h, ok := a.t.(tHelper); ok {
|
||||||
|
h.Helper()
|
||||||
|
}
|
||||||
|
ErrorContainsf(a.t, theError, contains, msg, args...)
|
||||||
|
}
|
||||||
|
|
||||||
// ErrorIs asserts that at least one of the errors in err's chain matches target.
|
// ErrorIs asserts that at least one of the errors in err's chain matches target.
|
||||||
// This is a wrapper for errors.Is.
|
// This is a wrapper for errors.Is.
|
||||||
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) {
|
func (a *Assertions) ErrorIs(err error, target error, msgAndArgs ...interface{}) {
|
||||||
|
|
|
@ -0,0 +1,41 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package jaeger
|
||||||
|
|
||||||
|
import "context"
|
||||||
|
|
||||||
|
type jaegerKeyType int
|
||||||
|
|
||||||
|
const (
|
||||||
|
debugKey jaegerKeyType = iota
|
||||||
|
)
|
||||||
|
|
||||||
|
// withDebug returns a copy of parent with debug set as the debug flag value .
|
||||||
|
func withDebug(parent context.Context, debug bool) context.Context {
|
||||||
|
return context.WithValue(parent, debugKey, debug)
|
||||||
|
}
|
||||||
|
|
||||||
|
// debugFromContext returns the debug value stored in ctx.
|
||||||
|
//
|
||||||
|
// If no debug value is stored in ctx false is returned.
|
||||||
|
func debugFromContext(ctx context.Context) bool {
|
||||||
|
if ctx == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if debug, ok := ctx.Value(debugKey).(bool); ok {
|
||||||
|
return debug
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
|
@ -0,0 +1,17 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// This package implements the Jaeger propagator specification as defined
|
||||||
|
// at https://www.jaegertracing.io/docs/1.18/client-libraries/#propagation-format
|
||||||
|
package jaeger // import "go.opentelemetry.io/contrib/propagators/jaeger"
|
161
vendor/go.opentelemetry.io/contrib/propagators/Jaeger/jaeger_propagator.go
generated
vendored
Normal file
161
vendor/go.opentelemetry.io/contrib/propagators/Jaeger/jaeger_propagator.go
generated
vendored
Normal file
|
@ -0,0 +1,161 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package jaeger
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/propagation"
|
||||||
|
"go.opentelemetry.io/otel/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
jaegerHeader = "uber-trace-id"
|
||||||
|
separator = ":"
|
||||||
|
traceID64bitsWidth = 64 / 4
|
||||||
|
traceID128bitsWidth = 128 / 4
|
||||||
|
spanIDWidth = 64 / 4
|
||||||
|
|
||||||
|
traceIDPadding = "0000000000000000"
|
||||||
|
|
||||||
|
flagsDebug = 0x02
|
||||||
|
flagsSampled = 0x01
|
||||||
|
flagsNotSampled = 0x00
|
||||||
|
|
||||||
|
deprecatedParentSpanID = "0"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
empty = trace.SpanContext{}
|
||||||
|
|
||||||
|
errMalformedTraceContextVal = errors.New("header value of uber-trace-id should contain four different part separated by : ")
|
||||||
|
errInvalidTraceIDLength = errors.New("invalid trace id length, must be either 16 or 32")
|
||||||
|
errMalformedTraceID = errors.New("cannot decode trace id from header, should be a string of hex, lowercase trace id can't be all zero")
|
||||||
|
errInvalidSpanIDLength = errors.New("invalid span id length, must be 16")
|
||||||
|
errMalformedSpanID = errors.New("cannot decode span id from header, should be a string of hex, lowercase span id can't be all zero")
|
||||||
|
errMalformedFlag = errors.New("cannot decode flag")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Jaeger propagator serializes SpanContext to/from Jaeger Headers
|
||||||
|
//
|
||||||
|
// Jaeger format:
|
||||||
|
//
|
||||||
|
// uber-trace-id: {trace-id}:{span-id}:{parent-span-id}:{flags}
|
||||||
|
type Jaeger struct{}
|
||||||
|
|
||||||
|
var _ propagation.TextMapPropagator = &Jaeger{}
|
||||||
|
|
||||||
|
// Inject injects a context to the carrier following jaeger format.
|
||||||
|
// The parent span ID is set to an dummy parent span id as the most implementations do.
|
||||||
|
func (jaeger Jaeger) Inject(ctx context.Context, carrier propagation.TextMapCarrier) {
|
||||||
|
sc := trace.SpanFromContext(ctx).SpanContext()
|
||||||
|
headers := []string{}
|
||||||
|
if !sc.TraceID().IsValid() || !sc.SpanID().IsValid() {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
headers = append(headers, sc.TraceID().String(), sc.SpanID().String(), deprecatedParentSpanID)
|
||||||
|
if debugFromContext(ctx) {
|
||||||
|
headers = append(headers, fmt.Sprintf("%x", flagsDebug|flagsSampled))
|
||||||
|
} else if sc.IsSampled() {
|
||||||
|
headers = append(headers, fmt.Sprintf("%x", flagsSampled))
|
||||||
|
} else {
|
||||||
|
headers = append(headers, fmt.Sprintf("%x", flagsNotSampled))
|
||||||
|
}
|
||||||
|
|
||||||
|
carrier.Set(jaegerHeader, strings.Join(headers, separator))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Extract extracts a context from the carrier if it contains Jaeger headers.
|
||||||
|
func (jaeger Jaeger) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context {
|
||||||
|
// extract tracing information
|
||||||
|
if h := carrier.Get(jaegerHeader); h != "" {
|
||||||
|
ctx, sc, err := extract(ctx, h)
|
||||||
|
if err == nil && sc.IsValid() {
|
||||||
|
return trace.ContextWithRemoteSpanContext(ctx, sc)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return ctx
|
||||||
|
}
|
||||||
|
|
||||||
|
func extract(ctx context.Context, headerVal string) (context.Context, trace.SpanContext, error) {
|
||||||
|
var (
|
||||||
|
scc = trace.SpanContextConfig{}
|
||||||
|
err error
|
||||||
|
)
|
||||||
|
|
||||||
|
parts := strings.Split(headerVal, separator)
|
||||||
|
if len(parts) != 4 {
|
||||||
|
return ctx, empty, errMalformedTraceContextVal
|
||||||
|
}
|
||||||
|
|
||||||
|
// extract trace ID
|
||||||
|
if parts[0] != "" {
|
||||||
|
id := parts[0]
|
||||||
|
if len(id) != traceID128bitsWidth && len(id) != traceID64bitsWidth {
|
||||||
|
return ctx, empty, errInvalidTraceIDLength
|
||||||
|
}
|
||||||
|
// padding when length is 16
|
||||||
|
if len(id) == traceID64bitsWidth {
|
||||||
|
id = traceIDPadding + id
|
||||||
|
}
|
||||||
|
scc.TraceID, err = trace.TraceIDFromHex(id)
|
||||||
|
if err != nil {
|
||||||
|
return ctx, empty, errMalformedTraceID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// extract span ID
|
||||||
|
if parts[1] != "" {
|
||||||
|
id := parts[1]
|
||||||
|
if len(id) != spanIDWidth {
|
||||||
|
return ctx, empty, errInvalidSpanIDLength
|
||||||
|
}
|
||||||
|
scc.SpanID, err = trace.SpanIDFromHex(id)
|
||||||
|
if err != nil {
|
||||||
|
return ctx, empty, errMalformedSpanID
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// skip third part as it is deprecated
|
||||||
|
|
||||||
|
// extract flag
|
||||||
|
if parts[3] != "" {
|
||||||
|
flagStr := parts[3]
|
||||||
|
flag, err := strconv.ParseInt(flagStr, 16, 64)
|
||||||
|
if err != nil {
|
||||||
|
return ctx, empty, errMalformedFlag
|
||||||
|
}
|
||||||
|
if flag&flagsSampled == flagsSampled {
|
||||||
|
// if sample bit is set, we check if debug bit is also set
|
||||||
|
if flag&flagsDebug == flagsDebug {
|
||||||
|
scc.TraceFlags |= trace.FlagsSampled
|
||||||
|
ctx = withDebug(ctx, true)
|
||||||
|
} else {
|
||||||
|
scc.TraceFlags |= trace.FlagsSampled
|
||||||
|
}
|
||||||
|
}
|
||||||
|
// ignore other bit, including firehose since we don't have corresponding flag in trace context.
|
||||||
|
}
|
||||||
|
return ctx, trace.NewSpanContext(scc), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (jaeger Jaeger) Fields() []string {
|
||||||
|
return []string{jaegerHeader}
|
||||||
|
}
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,3 @@
|
||||||
|
* text=auto eol=lf
|
||||||
|
*.{cmd,[cC][mM][dD]} text eol=crlf
|
||||||
|
*.{bat,[bB][aA][tT]} text eol=crlf
|
|
@ -0,0 +1,20 @@
|
||||||
|
.DS_Store
|
||||||
|
Thumbs.db
|
||||||
|
|
||||||
|
.tools/
|
||||||
|
.idea/
|
||||||
|
.vscode/
|
||||||
|
*.iml
|
||||||
|
*.so
|
||||||
|
coverage.*
|
||||||
|
|
||||||
|
gen/
|
||||||
|
|
||||||
|
/example/fib/fib
|
||||||
|
/example/jaeger/jaeger
|
||||||
|
/example/namedtracer/namedtracer
|
||||||
|
/example/opencensus/opencensus
|
||||||
|
/example/passthrough/passthrough
|
||||||
|
/example/prometheus/prometheus
|
||||||
|
/example/zipkin/zipkin
|
||||||
|
/example/otel-collector/otel-collector
|
|
@ -0,0 +1,3 @@
|
||||||
|
[submodule "opentelemetry-proto"]
|
||||||
|
path = exporters/otlp/internal/opentelemetry-proto
|
||||||
|
url = https://github.com/open-telemetry/opentelemetry-proto
|
|
@ -0,0 +1,47 @@
|
||||||
|
# See https://github.com/golangci/golangci-lint#config-file
|
||||||
|
run:
|
||||||
|
issues-exit-code: 1 #Default
|
||||||
|
tests: true #Default
|
||||||
|
|
||||||
|
linters:
|
||||||
|
# Disable everything by default so upgrades to not include new "default
|
||||||
|
# enabled" linters.
|
||||||
|
disable-all: true
|
||||||
|
# Specifically enable linters we want to use.
|
||||||
|
enable:
|
||||||
|
- deadcode
|
||||||
|
- errcheck
|
||||||
|
- gofmt
|
||||||
|
- goimports
|
||||||
|
- gosimple
|
||||||
|
- govet
|
||||||
|
- ineffassign
|
||||||
|
- misspell
|
||||||
|
- revive
|
||||||
|
- staticcheck
|
||||||
|
- structcheck
|
||||||
|
- typecheck
|
||||||
|
- unused
|
||||||
|
- varcheck
|
||||||
|
|
||||||
|
|
||||||
|
issues:
|
||||||
|
exclude-rules:
|
||||||
|
# helpers in tests often (rightfully) pass a *testing.T as their first argument
|
||||||
|
- path: _test\.go
|
||||||
|
text: "context.Context should be the first parameter of a function"
|
||||||
|
linters:
|
||||||
|
- revive
|
||||||
|
# Yes, they are, but it's okay in a test
|
||||||
|
- path: _test\.go
|
||||||
|
text: "exported func.*returns unexported type.*which can be annoying to use"
|
||||||
|
linters:
|
||||||
|
- revive
|
||||||
|
|
||||||
|
linters-settings:
|
||||||
|
misspell:
|
||||||
|
locale: US
|
||||||
|
ignore-words:
|
||||||
|
- cancelled
|
||||||
|
goimports:
|
||||||
|
local-prefixes: go.opentelemetry.io
|
|
@ -0,0 +1,20 @@
|
||||||
|
{
|
||||||
|
"ignorePatterns": [
|
||||||
|
{
|
||||||
|
"pattern": "^http(s)?://localhost"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"replacementPatterns": [
|
||||||
|
{
|
||||||
|
"pattern": "^/registry",
|
||||||
|
"replacement": "https://opentelemetry.io/registry"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"pattern": "^/docs/",
|
||||||
|
"replacement": "https://opentelemetry.io/docs/"
|
||||||
|
}
|
||||||
|
],
|
||||||
|
"retryOn429": true,
|
||||||
|
"retryCount": 5,
|
||||||
|
"fallbackRetryDelay": "30s"
|
||||||
|
}
|
|
@ -0,0 +1,29 @@
|
||||||
|
# Default state for all rules
|
||||||
|
default: true
|
||||||
|
|
||||||
|
# ul-style
|
||||||
|
MD004: false
|
||||||
|
|
||||||
|
# hard-tabs
|
||||||
|
MD010: false
|
||||||
|
|
||||||
|
# line-length
|
||||||
|
MD013: false
|
||||||
|
|
||||||
|
# no-duplicate-header
|
||||||
|
MD024:
|
||||||
|
siblings_only: true
|
||||||
|
|
||||||
|
#single-title
|
||||||
|
MD025: false
|
||||||
|
|
||||||
|
# ol-prefix
|
||||||
|
MD029:
|
||||||
|
style: ordered
|
||||||
|
|
||||||
|
# no-inline-html
|
||||||
|
MD033: false
|
||||||
|
|
||||||
|
# fenced-code-language
|
||||||
|
MD040: false
|
||||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,17 @@
|
||||||
|
#####################################################
|
||||||
|
#
|
||||||
|
# List of approvers for this repository
|
||||||
|
#
|
||||||
|
#####################################################
|
||||||
|
#
|
||||||
|
# Learn about membership in OpenTelemetry community:
|
||||||
|
# https://github.com/open-telemetry/community/blob/main/community-membership.md
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Learn about CODEOWNERS file format:
|
||||||
|
# https://help.github.com/en/articles/about-code-owners
|
||||||
|
#
|
||||||
|
|
||||||
|
* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo @MadVikingGod @pellared @hanyuancheung
|
||||||
|
|
||||||
|
CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod
|
|
@ -0,0 +1,522 @@
|
||||||
|
# Contributing to opentelemetry-go
|
||||||
|
|
||||||
|
The Go special interest group (SIG) meets regularly. See the
|
||||||
|
OpenTelemetry
|
||||||
|
[community](https://github.com/open-telemetry/community#golang-sdk)
|
||||||
|
repo for information on this and other language SIGs.
|
||||||
|
|
||||||
|
See the [public meeting
|
||||||
|
notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b)
|
||||||
|
for a summary description of past meetings. To request edit access,
|
||||||
|
join the meeting or get in touch on
|
||||||
|
[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT).
|
||||||
|
|
||||||
|
## Development
|
||||||
|
|
||||||
|
You can view and edit the source code by cloning this repository:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/open-telemetry/opentelemetry-go.git
|
||||||
|
```
|
||||||
|
|
||||||
|
Run `make test` to run the tests instead of `go test`.
|
||||||
|
|
||||||
|
There are some generated files checked into the repo. To make sure
|
||||||
|
that the generated files are up-to-date, run `make` (or `make
|
||||||
|
precommit` - the `precommit` target is the default).
|
||||||
|
|
||||||
|
The `precommit` target also fixes the formatting of the code and
|
||||||
|
checks the status of the go module files.
|
||||||
|
|
||||||
|
If after running `make precommit` the output of `git status` contains
|
||||||
|
`nothing to commit, working tree clean` then it means that everything
|
||||||
|
is up-to-date and properly formatted.
|
||||||
|
|
||||||
|
## Pull Requests
|
||||||
|
|
||||||
|
### How to Send Pull Requests
|
||||||
|
|
||||||
|
Everyone is welcome to contribute code to `opentelemetry-go` via
|
||||||
|
GitHub pull requests (PRs).
|
||||||
|
|
||||||
|
To create a new PR, fork the project in GitHub and clone the upstream
|
||||||
|
repo:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
go get -d go.opentelemetry.io/otel
|
||||||
|
```
|
||||||
|
|
||||||
|
(This may print some warning about "build constraints exclude all Go
|
||||||
|
files", just ignore it.)
|
||||||
|
|
||||||
|
This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You
|
||||||
|
can alternatively use `git` directly with:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git clone https://github.com/open-telemetry/opentelemetry-go
|
||||||
|
```
|
||||||
|
|
||||||
|
(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name -
|
||||||
|
that name is a kind of a redirector to GitHub that `go get` can
|
||||||
|
understand, but `git` does not.)
|
||||||
|
|
||||||
|
This would put the project in the `opentelemetry-go` directory in
|
||||||
|
current working directory.
|
||||||
|
|
||||||
|
Enter the newly created directory and add your fork as a new remote:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git remote add <YOUR_FORK> git@github.com:<YOUR_GITHUB_USERNAME>/opentelemetry-go
|
||||||
|
```
|
||||||
|
|
||||||
|
Check out a new branch, make modifications, run linters and tests, update
|
||||||
|
`CHANGELOG.md`, and push the branch to your fork:
|
||||||
|
|
||||||
|
```sh
|
||||||
|
git checkout -b <YOUR_BRANCH_NAME>
|
||||||
|
# edit files
|
||||||
|
# update changelog
|
||||||
|
make precommit
|
||||||
|
git add -p
|
||||||
|
git commit
|
||||||
|
git push <YOUR_FORK> <YOUR_BRANCH_NAME>
|
||||||
|
```
|
||||||
|
|
||||||
|
Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull
|
||||||
|
request ID to the entry you added to `CHANGELOG.md`.
|
||||||
|
|
||||||
|
### How to Receive Comments
|
||||||
|
|
||||||
|
* If the PR is not ready for review, please put `[WIP]` in the title,
|
||||||
|
tag it as `work-in-progress`, or mark it as
|
||||||
|
[`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/).
|
||||||
|
* Make sure CLA is signed and CI is clear.
|
||||||
|
|
||||||
|
### How to Get PRs Merged
|
||||||
|
|
||||||
|
A PR is considered to be **ready to merge** when:
|
||||||
|
|
||||||
|
* It has received two approvals from Collaborators/Maintainers (at
|
||||||
|
different companies). This is not enforced through technical means
|
||||||
|
and a PR may be **ready to merge** with a single approval if the change
|
||||||
|
and its approach have been discussed and consensus reached.
|
||||||
|
* Feedback has been addressed.
|
||||||
|
* Any substantive changes to your PR will require that you clear any prior
|
||||||
|
Approval reviews, this includes changes resulting from other feedback. Unless
|
||||||
|
the approver explicitly stated that their approval will persist across
|
||||||
|
changes it should be assumed that the PR needs their review again. Other
|
||||||
|
project members (e.g. approvers, maintainers) can help with this if there are
|
||||||
|
any questions or if you forget to clear reviews.
|
||||||
|
* It has been open for review for at least one working day. This gives
|
||||||
|
people reasonable time to review.
|
||||||
|
* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for
|
||||||
|
one day and may be merged with a single Maintainer's approval.
|
||||||
|
* `CHANGELOG.md` has been updated to reflect what has been
|
||||||
|
added, changed, removed, or fixed.
|
||||||
|
* `README.md` has been updated if necessary.
|
||||||
|
* Urgent fix can take exception as long as it has been actively
|
||||||
|
communicated.
|
||||||
|
|
||||||
|
Any Maintainer can merge the PR once it is **ready to merge**.
|
||||||
|
|
||||||
|
## Design Choices
|
||||||
|
|
||||||
|
As with other OpenTelemetry clients, opentelemetry-go follows the
|
||||||
|
[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification).
|
||||||
|
|
||||||
|
It's especially valuable to read through the [library
|
||||||
|
guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md).
|
||||||
|
|
||||||
|
### Focus on Capabilities, Not Structure Compliance
|
||||||
|
|
||||||
|
OpenTelemetry is an evolving specification, one where the desires and
|
||||||
|
use cases are clear, but the method to satisfy those uses cases are
|
||||||
|
not.
|
||||||
|
|
||||||
|
As such, Contributions should provide functionality and behavior that
|
||||||
|
conforms to the specification, but the interface and structure is
|
||||||
|
flexible.
|
||||||
|
|
||||||
|
It is preferable to have contributions follow the idioms of the
|
||||||
|
language rather than conform to specific API names or argument
|
||||||
|
patterns in the spec.
|
||||||
|
|
||||||
|
For a deeper discussion, see
|
||||||
|
[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165).
|
||||||
|
|
||||||
|
## Documentation
|
||||||
|
|
||||||
|
Each non-example Go Module should have its own `README.md` containing:
|
||||||
|
|
||||||
|
- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/).
|
||||||
|
- Brief description.
|
||||||
|
- Installation instructions (and requirements if applicable).
|
||||||
|
- Hyperlink to an example. Depending on the component the example can be:
|
||||||
|
- An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go).
|
||||||
|
- A sample Go application with its own `README.md`, like [here](example/zipkin).
|
||||||
|
- Additional documentation sections such us:
|
||||||
|
- Configuration,
|
||||||
|
- Contributing,
|
||||||
|
- References.
|
||||||
|
|
||||||
|
[Here](exporters/jaeger/README.md) is an example of a concise `README.md`.
|
||||||
|
|
||||||
|
Moreover, it should be possible to navigate to any `README.md` from the
|
||||||
|
root `README.md`.
|
||||||
|
|
||||||
|
## Style Guide
|
||||||
|
|
||||||
|
One of the primary goals of this project is that it is actually used by
|
||||||
|
developers. With this goal in mind the project strives to build
|
||||||
|
user-friendly and idiomatic Go code adhering to the Go community's best
|
||||||
|
practices.
|
||||||
|
|
||||||
|
For a non-comprehensive but foundational overview of these best practices
|
||||||
|
the [Effective Go](https://golang.org/doc/effective_go.html) documentation
|
||||||
|
is an excellent starting place.
|
||||||
|
|
||||||
|
As a convenience for developers building this project the `make precommit`
|
||||||
|
will format, lint, validate, and in some cases fix the changes you plan to
|
||||||
|
submit. This check will need to pass for your changes to be able to be
|
||||||
|
merged.
|
||||||
|
|
||||||
|
In addition to idiomatic Go, the project has adopted certain standards for
|
||||||
|
implementations of common patterns. These standards should be followed as a
|
||||||
|
default, and if they are not followed documentation needs to be included as
|
||||||
|
to the reasons why.
|
||||||
|
|
||||||
|
### Configuration
|
||||||
|
|
||||||
|
When creating an instantiation function for a complex `type T struct`, it is
|
||||||
|
useful to allow variable number of options to be applied. However, the strong
|
||||||
|
type system of Go restricts the function design options. There are a few ways
|
||||||
|
to solve this problem, but we have landed on the following design.
|
||||||
|
|
||||||
|
#### `config`
|
||||||
|
|
||||||
|
Configuration should be held in a `struct` named `config`, or prefixed with
|
||||||
|
specific type name this Configuration applies to if there are multiple
|
||||||
|
`config` in the package. This type must contain configuration options.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// config contains configuration options for a thing.
|
||||||
|
type config struct {
|
||||||
|
// options ...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
In general the `config` type will not need to be used externally to the
|
||||||
|
package and should be unexported. If, however, it is expected that the user
|
||||||
|
will likely want to build custom options for the configuration, the `config`
|
||||||
|
should be exported. Please, include in the documentation for the `config`
|
||||||
|
how the user can extend the configuration.
|
||||||
|
|
||||||
|
It is important that internal `config` are not shared across package boundaries.
|
||||||
|
Meaning a `config` from one package should not be directly used by another. The
|
||||||
|
one exception is the API packages. The configs from the base API, eg.
|
||||||
|
`go.opentelemetry.io/otel/trace.TracerConfig` and
|
||||||
|
`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed
|
||||||
|
by the SDK therefor it is expected that these are exported.
|
||||||
|
|
||||||
|
When a config is exported we want to maintain forward and backward
|
||||||
|
compatibility, to achieve this no fields should be exported but should
|
||||||
|
instead be accessed by methods.
|
||||||
|
|
||||||
|
Optionally, it is common to include a `newConfig` function (with the same
|
||||||
|
naming scheme). This function wraps any defaults setting and looping over
|
||||||
|
all options to create a configured `config`.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// newConfig returns an appropriately configured config.
|
||||||
|
func newConfig(options ...Option) config {
|
||||||
|
// Set default values for config.
|
||||||
|
config := config{/* […] */}
|
||||||
|
for _, option := range options {
|
||||||
|
config = option.apply(config)
|
||||||
|
}
|
||||||
|
// Preform any validation here.
|
||||||
|
return config
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
If validation of the `config` options is also preformed this can return an
|
||||||
|
error as well that is expected to be handled by the instantiation function
|
||||||
|
or propagated to the user.
|
||||||
|
|
||||||
|
Given the design goal of not having the user need to work with the `config`,
|
||||||
|
the `newConfig` function should also be unexported.
|
||||||
|
|
||||||
|
#### `Option`
|
||||||
|
|
||||||
|
To set the value of the options a `config` contains, a corresponding
|
||||||
|
`Option` interface type should be used.
|
||||||
|
|
||||||
|
```go
|
||||||
|
type Option interface {
|
||||||
|
apply(config) config
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Having `apply` unexported makes sure that it will not be used externally.
|
||||||
|
Moreover, the interface becomes sealed so the user cannot easily implement
|
||||||
|
the interface on its own.
|
||||||
|
|
||||||
|
The `apply` method should return a modified version of the passed config.
|
||||||
|
This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap.
|
||||||
|
|
||||||
|
The name of the interface should be prefixed in the same way the
|
||||||
|
corresponding `config` is (if at all).
|
||||||
|
|
||||||
|
#### Options
|
||||||
|
|
||||||
|
All user configurable options for a `config` must have a related unexported
|
||||||
|
implementation of the `Option` interface and an exported configuration
|
||||||
|
function that wraps this implementation.
|
||||||
|
|
||||||
|
The wrapping function name should be prefixed with `With*` (or in the
|
||||||
|
special case of a boolean options `Without*`) and should have the following
|
||||||
|
function signature.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func With*(…) Option { … }
|
||||||
|
```
|
||||||
|
|
||||||
|
##### `bool` Options
|
||||||
|
|
||||||
|
```go
|
||||||
|
type defaultFalseOption bool
|
||||||
|
|
||||||
|
func (o defaultFalseOption) apply(c config) config {
|
||||||
|
c.Bool = bool(o)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithOption sets a T to have an option included.
|
||||||
|
func WithOption() Option {
|
||||||
|
return defaultFalseOption(true)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
```go
|
||||||
|
type defaultTrueOption bool
|
||||||
|
|
||||||
|
func (o defaultTrueOption) apply(c config) config {
|
||||||
|
c.Bool = bool(o)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithoutOption sets a T to have Bool option excluded.
|
||||||
|
func WithoutOption() Option {
|
||||||
|
return defaultTrueOption(false)
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Declared Type Options
|
||||||
|
|
||||||
|
```go
|
||||||
|
type myTypeOption struct {
|
||||||
|
MyType MyType
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o myTypeOption) apply(c config) config {
|
||||||
|
c.MyType = o.MyType
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMyType sets T to have include MyType.
|
||||||
|
func WithMyType(t MyType) Option {
|
||||||
|
return myTypeOption{t}
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
##### Functional Options
|
||||||
|
|
||||||
|
```go
|
||||||
|
type optionFunc func(config) config
|
||||||
|
|
||||||
|
func (fn optionFunc) apply(c config) config {
|
||||||
|
return fn(c)
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithMyType sets t as MyType.
|
||||||
|
func WithMyType(t MyType) Option {
|
||||||
|
return optionFunc(func(c config) config {
|
||||||
|
c.MyType = t
|
||||||
|
return c
|
||||||
|
})
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
#### Instantiation
|
||||||
|
|
||||||
|
Using this configuration pattern to configure instantiation with a `NewT`
|
||||||
|
function.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func NewT(options ...Option) T {…}
|
||||||
|
```
|
||||||
|
|
||||||
|
Any required parameters can be declared before the variadic `options`.
|
||||||
|
|
||||||
|
#### Dealing with Overlap
|
||||||
|
|
||||||
|
Sometimes there are multiple complex `struct` that share common
|
||||||
|
configuration and also have distinct configuration. To avoid repeated
|
||||||
|
portions of `config`s, a common `config` can be used with the union of
|
||||||
|
options being handled with the `Option` interface.
|
||||||
|
|
||||||
|
For example.
|
||||||
|
|
||||||
|
```go
|
||||||
|
// config holds options for all animals.
|
||||||
|
type config struct {
|
||||||
|
Weight float64
|
||||||
|
Color string
|
||||||
|
MaxAltitude float64
|
||||||
|
}
|
||||||
|
|
||||||
|
// DogOption apply Dog specific options.
|
||||||
|
type DogOption interface {
|
||||||
|
applyDog(config) config
|
||||||
|
}
|
||||||
|
|
||||||
|
// BirdOption apply Bird specific options.
|
||||||
|
type BirdOption interface {
|
||||||
|
applyBird(config) config
|
||||||
|
}
|
||||||
|
|
||||||
|
// Option apply options for all animals.
|
||||||
|
type Option interface {
|
||||||
|
BirdOption
|
||||||
|
DogOption
|
||||||
|
}
|
||||||
|
|
||||||
|
type weightOption float64
|
||||||
|
|
||||||
|
func (o weightOption) applyDog(c config) config {
|
||||||
|
c.Weight = float64(o)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func (o weightOption) applyBird(c config) config {
|
||||||
|
c.Weight = float64(o)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithWeight(w float64) Option { return weightOption(w) }
|
||||||
|
|
||||||
|
type furColorOption string
|
||||||
|
|
||||||
|
func (o furColorOption) applyDog(c config) config {
|
||||||
|
c.Color = string(o)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithFurColor(c string) DogOption { return furColorOption(c) }
|
||||||
|
|
||||||
|
type maxAltitudeOption float64
|
||||||
|
|
||||||
|
func (o maxAltitudeOption) applyBird(c config) config {
|
||||||
|
c.MaxAltitude = float64(o)
|
||||||
|
return c
|
||||||
|
}
|
||||||
|
|
||||||
|
func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) }
|
||||||
|
|
||||||
|
func NewDog(name string, o ...DogOption) Dog {…}
|
||||||
|
func NewBird(name string, o ...BirdOption) Bird {…}
|
||||||
|
```
|
||||||
|
|
||||||
|
### Interfaces
|
||||||
|
|
||||||
|
To allow other developers to better comprehend the code, it is important
|
||||||
|
to ensure it is sufficiently documented. One simple measure that contributes
|
||||||
|
to this aim is self-documenting by naming method parameters. Therefore,
|
||||||
|
where appropriate, methods of every exported interface type should have
|
||||||
|
their parameters appropriately named.
|
||||||
|
|
||||||
|
#### Interface Stability
|
||||||
|
|
||||||
|
All exported stable interfaces that include the following warning in their
|
||||||
|
doumentation are allowed to be extended with additional methods.
|
||||||
|
|
||||||
|
> Warning: methods may be added to this interface in minor releases.
|
||||||
|
|
||||||
|
Otherwise, stable interfaces MUST NOT be modified.
|
||||||
|
|
||||||
|
If new functionality is needed for an interface that cannot be changed it MUST
|
||||||
|
be added by including an additional interface. That added interface can be a
|
||||||
|
simple interface for the specific functionality that you want to add or it can
|
||||||
|
be a super-set of the original interface. For example, if you wanted to a
|
||||||
|
`Close` method to the `Exporter` interface:
|
||||||
|
|
||||||
|
```go
|
||||||
|
type Exporter interface {
|
||||||
|
Export()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
A new interface, `Closer`, can be added:
|
||||||
|
|
||||||
|
```go
|
||||||
|
type Closer interface {
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Code that is passed the `Exporter` interface can now check to see if the passed
|
||||||
|
value also satisfies the new interface. E.g.
|
||||||
|
|
||||||
|
```go
|
||||||
|
func caller(e Exporter) {
|
||||||
|
/* ... */
|
||||||
|
if c, ok := e.(Closer); ok {
|
||||||
|
c.Close()
|
||||||
|
}
|
||||||
|
/* ... */
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
Alternatively, a new type that is the super-set of an `Exporter` can be created.
|
||||||
|
|
||||||
|
```go
|
||||||
|
type ClosingExporter struct {
|
||||||
|
Exporter
|
||||||
|
Close()
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
|
This new type can be used similar to the simple interface above in that a
|
||||||
|
passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type
|
||||||
|
and the `Close` method called.
|
||||||
|
|
||||||
|
This super-set approach can be useful if there is explicit behavior that needs
|
||||||
|
to be coupled with the original type and passed as a unified type to a new
|
||||||
|
function, but, because of this coupling, it also limits the applicability of
|
||||||
|
the added functionality. If there exist other interfaces where this
|
||||||
|
functionality should be added, each one will need their own super-set
|
||||||
|
interfaces and will duplicate the pattern. For this reason, the simple targeted
|
||||||
|
interface that defines the specific functionality should be preferred.
|
||||||
|
|
||||||
|
## Approvers and Maintainers
|
||||||
|
|
||||||
|
Approvers:
|
||||||
|
|
||||||
|
- [Evan Torrie](https://github.com/evantorrie), Verizon Media
|
||||||
|
- [Josh MacDonald](https://github.com/jmacd), LightStep
|
||||||
|
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
|
||||||
|
- [David Ashpole](https://github.com/dashpole), Google
|
||||||
|
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
|
||||||
|
- [Robert Pająk](https://github.com/pellared), Splunk
|
||||||
|
- [Chester Cheung](https://github.com/hanyuancheung), Tencent
|
||||||
|
|
||||||
|
Maintainers:
|
||||||
|
|
||||||
|
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep
|
||||||
|
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS
|
||||||
|
- [Tyler Yahn](https://github.com/MrAlias), Splunk
|
||||||
|
|
||||||
|
### Become an Approver or a Maintainer
|
||||||
|
|
||||||
|
See the [community membership document in OpenTelemetry community
|
||||||
|
repo](https://github.com/open-telemetry/community/blob/main/community-membership.md).
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
|
@ -0,0 +1,209 @@
|
||||||
|
# Copyright The OpenTelemetry Authors
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
TOOLS_MOD_DIR := ./internal/tools
|
||||||
|
|
||||||
|
ALL_DOCS := $(shell find . -name '*.md' -type f | sort)
|
||||||
|
ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
|
||||||
|
OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS))
|
||||||
|
ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort)
|
||||||
|
|
||||||
|
GO = go
|
||||||
|
TIMEOUT = 60
|
||||||
|
|
||||||
|
.DEFAULT_GOAL := precommit
|
||||||
|
|
||||||
|
.PHONY: precommit ci
|
||||||
|
precommit: dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default
|
||||||
|
ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage
|
||||||
|
|
||||||
|
# Tools
|
||||||
|
|
||||||
|
TOOLS = $(CURDIR)/.tools
|
||||||
|
|
||||||
|
$(TOOLS):
|
||||||
|
@mkdir -p $@
|
||||||
|
$(TOOLS)/%: | $(TOOLS)
|
||||||
|
cd $(TOOLS_MOD_DIR) && \
|
||||||
|
$(GO) build -o $@ $(PACKAGE)
|
||||||
|
|
||||||
|
MULTIMOD = $(TOOLS)/multimod
|
||||||
|
$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
|
||||||
|
|
||||||
|
SEMCONVGEN = $(TOOLS)/semconvgen
|
||||||
|
$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
|
||||||
|
|
||||||
|
CROSSLINK = $(TOOLS)/crosslink
|
||||||
|
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink
|
||||||
|
|
||||||
|
DBOTCONF = $(TOOLS)/dbotconf
|
||||||
|
$(TOOLS)/dbotconf: PACKAGE=go.opentelemetry.io/build-tools/dbotconf
|
||||||
|
|
||||||
|
GOLANGCI_LINT = $(TOOLS)/golangci-lint
|
||||||
|
$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint
|
||||||
|
|
||||||
|
MISSPELL = $(TOOLS)/misspell
|
||||||
|
$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell
|
||||||
|
|
||||||
|
GOCOVMERGE = $(TOOLS)/gocovmerge
|
||||||
|
$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge
|
||||||
|
|
||||||
|
STRINGER = $(TOOLS)/stringer
|
||||||
|
$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer
|
||||||
|
|
||||||
|
PORTO = $(TOOLS)/porto
|
||||||
|
$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto
|
||||||
|
|
||||||
|
GOJQ = $(TOOLS)/gojq
|
||||||
|
$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq
|
||||||
|
|
||||||
|
.PHONY: tools
|
||||||
|
tools: $(CROSSLINK) $(DBOTCONF) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD)
|
||||||
|
|
||||||
|
# Build
|
||||||
|
|
||||||
|
.PHONY: generate build
|
||||||
|
|
||||||
|
generate: $(OTEL_GO_MOD_DIRS:%=generate/%)
|
||||||
|
generate/%: DIR=$*
|
||||||
|
generate/%: | $(STRINGER) $(PORTO)
|
||||||
|
@echo "$(GO) generate $(DIR)/..." \
|
||||||
|
&& cd $(DIR) \
|
||||||
|
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w .
|
||||||
|
|
||||||
|
build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%)
|
||||||
|
build/%: DIR=$*
|
||||||
|
build/%:
|
||||||
|
@echo "$(GO) build $(DIR)/..." \
|
||||||
|
&& cd $(DIR) \
|
||||||
|
&& $(GO) build ./...
|
||||||
|
|
||||||
|
build-tests/%: DIR=$*
|
||||||
|
build-tests/%:
|
||||||
|
@echo "$(GO) build tests $(DIR)/..." \
|
||||||
|
&& cd $(DIR) \
|
||||||
|
&& $(GO) list ./... \
|
||||||
|
| grep -v third_party \
|
||||||
|
| xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null
|
||||||
|
|
||||||
|
# Tests
|
||||||
|
|
||||||
|
TEST_TARGETS := test-default test-bench test-short test-verbose test-race
|
||||||
|
.PHONY: $(TEST_TARGETS) test
|
||||||
|
test-default test-race: ARGS=-race
|
||||||
|
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
|
||||||
|
test-short: ARGS=-short
|
||||||
|
test-verbose: ARGS=-v -race
|
||||||
|
$(TEST_TARGETS): test
|
||||||
|
test: $(OTEL_GO_MOD_DIRS:%=test/%)
|
||||||
|
test/%: DIR=$*
|
||||||
|
test/%:
|
||||||
|
@echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \
|
||||||
|
&& cd $(DIR) \
|
||||||
|
&& $(GO) list ./... \
|
||||||
|
| grep -v third_party \
|
||||||
|
| xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS)
|
||||||
|
|
||||||
|
COVERAGE_MODE = atomic
|
||||||
|
COVERAGE_PROFILE = coverage.out
|
||||||
|
.PHONY: test-coverage
|
||||||
|
test-coverage: | $(GOCOVMERGE)
|
||||||
|
@set -e; \
|
||||||
|
printf "" > coverage.txt; \
|
||||||
|
for dir in $(ALL_COVERAGE_MOD_DIRS); do \
|
||||||
|
echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \
|
||||||
|
(cd "$${dir}" && \
|
||||||
|
$(GO) list ./... \
|
||||||
|
| grep -v third_party \
|
||||||
|
| xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \
|
||||||
|
$(GO) tool cover -html=coverage.out -o coverage.html); \
|
||||||
|
done; \
|
||||||
|
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
|
||||||
|
|
||||||
|
.PHONY: golangci-lint golangci-lint-fix
|
||||||
|
golangci-lint-fix: ARGS=--fix
|
||||||
|
golangci-lint-fix: golangci-lint
|
||||||
|
golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
|
||||||
|
golangci-lint/%: DIR=$*
|
||||||
|
golangci-lint/%: | $(GOLANGCI_LINT)
|
||||||
|
@echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
|
||||||
|
&& cd $(DIR) \
|
||||||
|
&& $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
|
||||||
|
|
||||||
|
.PHONY: crosslink
|
||||||
|
crosslink: | $(CROSSLINK)
|
||||||
|
@echo "cross-linking all go modules" \
|
||||||
|
&& $(CROSSLINK)
|
||||||
|
|
||||||
|
.PHONY: go-mod-tidy
|
||||||
|
go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
|
||||||
|
go-mod-tidy/%: DIR=$*
|
||||||
|
go-mod-tidy/%: | crosslink
|
||||||
|
@echo "$(GO) mod tidy in $(DIR)" \
|
||||||
|
&& cd $(DIR) \
|
||||||
|
&& $(GO) mod tidy
|
||||||
|
|
||||||
|
.PHONY: lint-modules
|
||||||
|
lint-modules: go-mod-tidy
|
||||||
|
|
||||||
|
.PHONY: lint
|
||||||
|
lint: misspell lint-modules golangci-lint
|
||||||
|
|
||||||
|
.PHONY: vanity-import-check
|
||||||
|
vanity-import-check: | $(PORTO)
|
||||||
|
@$(PORTO) --include-internal -l .
|
||||||
|
|
||||||
|
.PHONY: misspell
|
||||||
|
misspell: | $(MISSPELL)
|
||||||
|
@$(MISSPELL) -w $(ALL_DOCS)
|
||||||
|
|
||||||
|
.PHONY: license-check
|
||||||
|
license-check:
|
||||||
|
@licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*') ; do \
|
||||||
|
awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \
|
||||||
|
done); \
|
||||||
|
if [ -n "$${licRes}" ]; then \
|
||||||
|
echo "license header checking failed:"; echo "$${licRes}"; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
DEPENDABOT_CONFIG = .github/dependabot.yml
|
||||||
|
.PHONY: dependabot-check
|
||||||
|
dependabot-check: | $(DBOTCONF)
|
||||||
|
@$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || echo "(run: make dependabot-generate)"
|
||||||
|
|
||||||
|
.PHONY: dependabot-generate
|
||||||
|
dependabot-generate: | $(DBOTCONF)
|
||||||
|
@$(DBOTCONF) generate > $(DEPENDABOT_CONFIG)
|
||||||
|
|
||||||
|
.PHONY: check-clean-work-tree
|
||||||
|
check-clean-work-tree:
|
||||||
|
@if ! git diff --quiet; then \
|
||||||
|
echo; \
|
||||||
|
echo 'Working tree is not clean, did you forget to run "make precommit"?'; \
|
||||||
|
echo; \
|
||||||
|
git status; \
|
||||||
|
exit 1; \
|
||||||
|
fi
|
||||||
|
|
||||||
|
.PHONY: prerelease
|
||||||
|
prerelease: | $(MULTIMOD)
|
||||||
|
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
|
||||||
|
$(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET}
|
||||||
|
|
||||||
|
COMMIT ?= "HEAD"
|
||||||
|
.PHONY: add-tags
|
||||||
|
add-tags: | $(MULTIMOD)
|
||||||
|
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
|
||||||
|
$(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
|
|
@ -0,0 +1,108 @@
|
||||||
|
# OpenTelemetry-Go
|
||||||
|
|
||||||
|
[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain)
|
||||||
|
[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main)
|
||||||
|
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel)
|
||||||
|
[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
|
||||||
|
[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT)
|
||||||
|
|
||||||
|
OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
|
||||||
|
It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms.
|
||||||
|
|
||||||
|
## Project Status
|
||||||
|
|
||||||
|
| Signal | Status | Project |
|
||||||
|
| ------- | ---------- | ------- |
|
||||||
|
| Traces | Stable | N/A |
|
||||||
|
| Metrics | Alpha | N/A |
|
||||||
|
| Logs | Frozen [1] | N/A |
|
||||||
|
|
||||||
|
- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics.
|
||||||
|
No Logs Pull Requests are currently being accepted.
|
||||||
|
|
||||||
|
Progress and status specific to this repository is tracked in our local
|
||||||
|
[project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
|
||||||
|
and
|
||||||
|
[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones).
|
||||||
|
|
||||||
|
Project versioning information and stability guarantees can be found in the
|
||||||
|
[versioning documentation](./VERSIONING.md).
|
||||||
|
|
||||||
|
### Compatibility
|
||||||
|
|
||||||
|
OpenTelemetry-Go attempts to track the current supported versions of the
|
||||||
|
[Go language](https://golang.org/doc/devel/release#policy). The release
|
||||||
|
schedule after a new minor version of go is as follows:
|
||||||
|
|
||||||
|
- The first release or one month, which ever is sooner, will add build steps for the new go version.
|
||||||
|
- The first release after three months will remove support for the oldest go version.
|
||||||
|
|
||||||
|
This project is tested on the following systems.
|
||||||
|
|
||||||
|
| OS | Go Version | Architecture |
|
||||||
|
| ------- | ---------- | ------------ |
|
||||||
|
| Ubuntu | 1.18 | amd64 |
|
||||||
|
| Ubuntu | 1.17 | amd64 |
|
||||||
|
| Ubuntu | 1.16 | amd64 |
|
||||||
|
| Ubuntu | 1.18 | 386 |
|
||||||
|
| Ubuntu | 1.17 | 386 |
|
||||||
|
| Ubuntu | 1.16 | 386 |
|
||||||
|
| MacOS | 1.18 | amd64 |
|
||||||
|
| MacOS | 1.17 | amd64 |
|
||||||
|
| MacOS | 1.16 | amd64 |
|
||||||
|
| Windows | 1.18 | amd64 |
|
||||||
|
| Windows | 1.17 | amd64 |
|
||||||
|
| Windows | 1.16 | amd64 |
|
||||||
|
| Windows | 1.18 | 386 |
|
||||||
|
| Windows | 1.17 | 386 |
|
||||||
|
| Windows | 1.16 | 386 |
|
||||||
|
|
||||||
|
While this project should work for other systems, no compatibility guarantees
|
||||||
|
are made for those systems currently.
|
||||||
|
|
||||||
|
Go 1.18 was added in March of 2022.
|
||||||
|
Go 1.16 will be removed around June 2022.
|
||||||
|
|
||||||
|
## Getting Started
|
||||||
|
|
||||||
|
You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/).
|
||||||
|
|
||||||
|
OpenTelemetry's goal is to provide a single set of APIs to capture distributed
|
||||||
|
traces and metrics from your application and send them to an observability
|
||||||
|
platform. This project allows you to do just that for applications written in
|
||||||
|
Go. There are two steps to this process: instrument your application, and
|
||||||
|
configure an exporter.
|
||||||
|
|
||||||
|
### Instrumentation
|
||||||
|
|
||||||
|
To start capturing distributed traces and metric events from your application
|
||||||
|
it first needs to be instrumented. The easiest way to do this is by using an
|
||||||
|
instrumentation library for your code. Be sure to check out [the officially
|
||||||
|
supported instrumentation
|
||||||
|
libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation).
|
||||||
|
|
||||||
|
If you need to extend the telemetry an instrumentation library provides or want
|
||||||
|
to build your own instrumentation for your application directly you will need
|
||||||
|
to use the
|
||||||
|
[Go otel](https://pkg.go.dev/go.opentelemetry.io/otel)
|
||||||
|
package. The included [examples](./example/) are a good way to see some
|
||||||
|
practical uses of this process.
|
||||||
|
|
||||||
|
### Export
|
||||||
|
|
||||||
|
Now that your application is instrumented to collect telemetry, it needs an
|
||||||
|
export pipeline to send that telemetry to an observability platform.
|
||||||
|
|
||||||
|
All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters).
|
||||||
|
|
||||||
|
| Exporter | Metrics | Traces |
|
||||||
|
| :-----------------------------------: | :-----: | :----: |
|
||||||
|
| [Jaeger](./exporters/jaeger/) | | ✓ |
|
||||||
|
| [OTLP](./exporters/otlp/) | ✓ | ✓ |
|
||||||
|
| [Prometheus](./exporters/prometheus/) | ✓ | |
|
||||||
|
| [stdout](./exporters/stdout/) | ✓ | ✓ |
|
||||||
|
| [Zipkin](./exporters/zipkin/) | | ✓ |
|
||||||
|
|
||||||
|
## Contributing
|
||||||
|
|
||||||
|
See the [contributing documentation](CONTRIBUTING.md).
|
|
@ -0,0 +1,132 @@
|
||||||
|
# Release Process
|
||||||
|
|
||||||
|
## Semantic Convention Generation
|
||||||
|
|
||||||
|
If a new version of the OpenTelemetry Specification has been released it will be necessary to generate a new
|
||||||
|
semantic convention package from the YAML definitions in the specification repository. There is a `semconvgen` utility
|
||||||
|
installed by `make tools` that can be used to generate the a package with the name matching the specification
|
||||||
|
version number under the `semconv` package. This will ideally be done soon after the specification release is
|
||||||
|
tagged. Make sure that the specification repo contains a checkout of the the latest tagged release so that the
|
||||||
|
generated files match the released semantic conventions.
|
||||||
|
|
||||||
|
There are currently two categories of semantic conventions that must be generated, `resource` and `trace`.
|
||||||
|
|
||||||
|
```
|
||||||
|
.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/resource -t semconv/template.j2
|
||||||
|
.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/trace -t semconv/template.j2
|
||||||
|
```
|
||||||
|
|
||||||
|
Using default values for all options other than `input` will result in using the `template.j2` template to
|
||||||
|
generate `resource.go` and `trace.go` in `/path/to/otelgo/repo/semconv/<version>`.
|
||||||
|
|
||||||
|
There are several ancillary files that are not generated and should be copied into the new package from the
|
||||||
|
prior package, with updates made as appropriate to canonical import path statements and constant values.
|
||||||
|
These files include:
|
||||||
|
|
||||||
|
* doc.go
|
||||||
|
* exception.go
|
||||||
|
* http(_test)?.go
|
||||||
|
* schema.go
|
||||||
|
|
||||||
|
Uses of the previous schema version in this repository should be updated to use the newly generated version.
|
||||||
|
No tooling for this exists at present, so use find/replace in your editor of choice or craft a `grep | sed`
|
||||||
|
pipeline if you like living on the edge.
|
||||||
|
|
||||||
|
## Pre-Release
|
||||||
|
|
||||||
|
First, decide which module sets will be released and update their versions
|
||||||
|
in `versions.yaml`. Commit this change to a new branch.
|
||||||
|
|
||||||
|
Update go.mod for submodules to depend on the new release which will happen in the next step.
|
||||||
|
|
||||||
|
1. Run the `prerelease` make target. It creates a branch
|
||||||
|
`prerelease_<module set>_<new tag>` that will contain all release changes.
|
||||||
|
|
||||||
|
```
|
||||||
|
make prerelease MODSET=<module set>
|
||||||
|
```
|
||||||
|
|
||||||
|
2. Verify the changes.
|
||||||
|
|
||||||
|
```
|
||||||
|
git diff ...prerelease_<module set>_<new tag>
|
||||||
|
```
|
||||||
|
|
||||||
|
This should have changed the version for all modules to be `<new tag>`.
|
||||||
|
If these changes look correct, merge them into your pre-release branch:
|
||||||
|
|
||||||
|
```go
|
||||||
|
git merge prerelease_<module set>_<new tag>
|
||||||
|
```
|
||||||
|
|
||||||
|
3. Update the [Changelog](./CHANGELOG.md).
|
||||||
|
- Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand.
|
||||||
|
To verify this, you can look directly at the commits since the `<last tag>`.
|
||||||
|
|
||||||
|
```
|
||||||
|
git --no-pager log --pretty=oneline "<last tag>..HEAD"
|
||||||
|
```
|
||||||
|
|
||||||
|
- Move all the `Unreleased` changes into a new section following the title scheme (`[<new tag>] - <date of release>`).
|
||||||
|
- Update all the appropriate links at the bottom.
|
||||||
|
|
||||||
|
4. Push the changes to upstream and create a Pull Request on GitHub.
|
||||||
|
Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description.
|
||||||
|
|
||||||
|
## Tag
|
||||||
|
|
||||||
|
Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit.
|
||||||
|
|
||||||
|
***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step!
|
||||||
|
Failure to do so will leave things in a broken state. As long as you do not
|
||||||
|
change `versions.yaml` between pre-release and this step, things should be fine.
|
||||||
|
|
||||||
|
***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189).
|
||||||
|
It is critical you make sure the version you push upstream is correct.
|
||||||
|
[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331).
|
||||||
|
|
||||||
|
1. For each module set that will be released, run the `add-tags` make target
|
||||||
|
using the `<commit-hash>` of the commit on the main branch for the merged Pull Request.
|
||||||
|
|
||||||
|
```
|
||||||
|
make add-tags MODSET=<module set> COMMIT=<commit hash>
|
||||||
|
```
|
||||||
|
|
||||||
|
It should only be necessary to provide an explicit `COMMIT` value if the
|
||||||
|
current `HEAD` of your working directory is not the correct commit.
|
||||||
|
|
||||||
|
2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`).
|
||||||
|
Make sure you push all sub-modules as well.
|
||||||
|
|
||||||
|
```
|
||||||
|
git push upstream <new tag>
|
||||||
|
git push upstream <submodules-path/new tag>
|
||||||
|
...
|
||||||
|
```
|
||||||
|
|
||||||
|
## Release
|
||||||
|
|
||||||
|
Finally create a Release for the new `<new tag>` on GitHub.
|
||||||
|
The release body should include all the release notes from the Changelog for this release.
|
||||||
|
|
||||||
|
## Verify Examples
|
||||||
|
|
||||||
|
After releasing verify that examples build outside of the repository.
|
||||||
|
|
||||||
|
```
|
||||||
|
./verify_examples.sh
|
||||||
|
```
|
||||||
|
|
||||||
|
The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them.
|
||||||
|
This ensures they build with the published release, not the local copy.
|
||||||
|
|
||||||
|
## Post-Release
|
||||||
|
|
||||||
|
### Contrib Repository
|
||||||
|
|
||||||
|
Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release.
|
||||||
|
|
||||||
|
### Website Documentation
|
||||||
|
|
||||||
|
Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/).
|
||||||
|
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
|
|
@ -0,0 +1,224 @@
|
||||||
|
# Versioning
|
||||||
|
|
||||||
|
This document describes the versioning policy for this repository. This policy
|
||||||
|
is designed so the following goals can be achieved.
|
||||||
|
|
||||||
|
**Users are provided a codebase of value that is stable and secure.**
|
||||||
|
|
||||||
|
## Policy
|
||||||
|
|
||||||
|
* Versioning of this project will be idiomatic of a Go project using [Go
|
||||||
|
modules](https://github.com/golang/go/wiki/Modules).
|
||||||
|
* [Semantic import
|
||||||
|
versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
|
||||||
|
will be used.
|
||||||
|
* Versions will comply with [semver
|
||||||
|
2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions.
|
||||||
|
* New methods may be added to exported API interfaces. All exported
|
||||||
|
interfaces that fall within this exception will include the following
|
||||||
|
paragraph in their public documentation.
|
||||||
|
|
||||||
|
> Warning: methods may be added to this interface in minor releases.
|
||||||
|
|
||||||
|
* If a module is version `v2` or higher, the major version of the module
|
||||||
|
must be included as a `/vN` at the end of the module paths used in
|
||||||
|
`go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require
|
||||||
|
go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path
|
||||||
|
(e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the
|
||||||
|
paths used in `go get` commands (e.g., `go get
|
||||||
|
go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a
|
||||||
|
`@v2.0.1` in that example. One way to think about it is that the module
|
||||||
|
name now includes the `/v2`, so include `/v2` whenever you are using the
|
||||||
|
module name).
|
||||||
|
* If a module is version `v0` or `v1`, do not include the major version in
|
||||||
|
either the module path or the import path.
|
||||||
|
* Modules will be used to encapsulate signals and components.
|
||||||
|
* Experimental modules still under active development will be versioned at
|
||||||
|
`v0` to imply the stability guarantee defined by
|
||||||
|
[semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
|
||||||
|
|
||||||
|
> Major version zero (0.y.z) is for initial development. Anything MAY
|
||||||
|
> change at any time. The public API SHOULD NOT be considered stable.
|
||||||
|
|
||||||
|
* Mature modules for which we guarantee a stable public API will be versioned
|
||||||
|
with a major version greater than `v0`.
|
||||||
|
* The decision to make a module stable will be made on a case-by-case
|
||||||
|
basis by the maintainers of this project.
|
||||||
|
* Experimental modules will start their versioning at `v0.0.0` and will
|
||||||
|
increment their minor version when backwards incompatible changes are
|
||||||
|
released and increment their patch version when backwards compatible
|
||||||
|
changes are released.
|
||||||
|
* All stable modules that use the same major version number will use the
|
||||||
|
same entire version number.
|
||||||
|
* Stable modules may be released with an incremented minor or patch
|
||||||
|
version even though that module has not been changed, but rather so
|
||||||
|
that it will remain at the same version as other stable modules that
|
||||||
|
did undergo change.
|
||||||
|
* When an experimental module becomes stable a new stable module version
|
||||||
|
will be released and will include this now stable module. The new
|
||||||
|
stable module version will be an increment of the minor version number
|
||||||
|
and will be applied to all existing stable modules as well as the newly
|
||||||
|
stable module being released.
|
||||||
|
* Versioning of the associated [contrib
|
||||||
|
repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of
|
||||||
|
this project will be idiomatic of a Go project using [Go
|
||||||
|
modules](https://github.com/golang/go/wiki/Modules).
|
||||||
|
* [Semantic import
|
||||||
|
versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning)
|
||||||
|
will be used.
|
||||||
|
* Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html).
|
||||||
|
* If a module is version `v2` or higher, the
|
||||||
|
major version of the module must be included as a `/vN` at the end of the
|
||||||
|
module paths used in `go.mod` files (e.g., `module
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/host/v2`, `require
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the
|
||||||
|
package import path (e.g., `import
|
||||||
|
"go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes
|
||||||
|
the paths used in `go get` commands (e.g., `go get
|
||||||
|
go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there
|
||||||
|
is both a `/v2` and a `@v2.0.1` in that example. One way to think about
|
||||||
|
it is that the module name now includes the `/v2`, so include `/v2`
|
||||||
|
whenever you are using the module name).
|
||||||
|
* If a module is version `v0` or `v1`, do not include the major version
|
||||||
|
in either the module path or the import path.
|
||||||
|
* In addition to public APIs, telemetry produced by stable instrumentation
|
||||||
|
will remain stable and backwards compatible. This is to avoid breaking
|
||||||
|
alerts and dashboard.
|
||||||
|
* Modules will be used to encapsulate instrumentation, detectors, exporters,
|
||||||
|
propagators, and any other independent sets of related components.
|
||||||
|
* Experimental modules still under active development will be versioned at
|
||||||
|
`v0` to imply the stability guarantee defined by
|
||||||
|
[semver](https://semver.org/spec/v2.0.0.html#spec-item-4).
|
||||||
|
|
||||||
|
> Major version zero (0.y.z) is for initial development. Anything MAY
|
||||||
|
> change at any time. The public API SHOULD NOT be considered stable.
|
||||||
|
|
||||||
|
* Mature modules for which we guarantee a stable public API and telemetry will
|
||||||
|
be versioned with a major version greater than `v0`.
|
||||||
|
* Experimental modules will start their versioning at `v0.0.0` and will
|
||||||
|
increment their minor version when backwards incompatible changes are
|
||||||
|
released and increment their patch version when backwards compatible
|
||||||
|
changes are released.
|
||||||
|
* Stable contrib modules cannot depend on experimental modules from this
|
||||||
|
project.
|
||||||
|
* All stable contrib modules of the same major version with this project
|
||||||
|
will use the same entire version as this project.
|
||||||
|
* Stable modules may be released with an incremented minor or patch
|
||||||
|
version even though that module's code has not been changed. Instead
|
||||||
|
the only change that will have been included is to have updated that
|
||||||
|
modules dependency on this project's stable APIs.
|
||||||
|
* When an experimental module in contrib becomes stable a new stable
|
||||||
|
module version will be released and will include this now stable
|
||||||
|
module. The new stable module version will be an increment of the minor
|
||||||
|
version number and will be applied to all existing stable contrib
|
||||||
|
modules, this project's modules, and the newly stable module being
|
||||||
|
released.
|
||||||
|
* Contrib modules will be kept up to date with this project's releases.
|
||||||
|
* Due to the dependency contrib modules will implicitly have on this
|
||||||
|
project's modules the release of stable contrib modules to match the
|
||||||
|
released version number will be staggered after this project's release.
|
||||||
|
There is no explicit time guarantee for how long after this projects
|
||||||
|
release the contrib release will be. Effort should be made to keep them
|
||||||
|
as close in time as possible.
|
||||||
|
* No additional stable release in this project can be made until the
|
||||||
|
contrib repository has a matching stable release.
|
||||||
|
* No release can be made in the contrib repository after this project's
|
||||||
|
stable release except for a stable release of the contrib repository.
|
||||||
|
* GitHub releases will be made for all releases.
|
||||||
|
* Go modules will be made available at Go package mirrors.
|
||||||
|
|
||||||
|
## Example Versioning Lifecycle
|
||||||
|
|
||||||
|
To better understand the implementation of the above policy the following
|
||||||
|
example is provided. This project is simplified to include only the following
|
||||||
|
modules and their versions:
|
||||||
|
|
||||||
|
* `otel`: `v0.14.0`
|
||||||
|
* `otel/trace`: `v0.14.0`
|
||||||
|
* `otel/metric`: `v0.14.0`
|
||||||
|
* `otel/baggage`: `v0.14.0`
|
||||||
|
* `otel/sdk/trace`: `v0.14.0`
|
||||||
|
* `otel/sdk/metric`: `v0.14.0`
|
||||||
|
|
||||||
|
These modules have been developed to a point where the `otel/trace`,
|
||||||
|
`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they
|
||||||
|
should be considered for a stable release. The `otel/metric` and
|
||||||
|
`otel/sdk/metric` are still under active development and the `otel` module
|
||||||
|
depends on both `otel/trace` and `otel/metric`.
|
||||||
|
|
||||||
|
The `otel` package is refactored to remove its dependencies on `otel/metric` so
|
||||||
|
it can be released as stable as well. With that done the following release
|
||||||
|
candidates are made:
|
||||||
|
|
||||||
|
* `otel`: `v1.0.0-RC1`
|
||||||
|
* `otel/trace`: `v1.0.0-RC1`
|
||||||
|
* `otel/baggage`: `v1.0.0-RC1`
|
||||||
|
* `otel/sdk/trace`: `v1.0.0-RC1`
|
||||||
|
|
||||||
|
The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`.
|
||||||
|
|
||||||
|
A few minor issues are discovered in the `otel/trace` package. These issues are
|
||||||
|
resolved with some minor, but backwards incompatible, changes and are released
|
||||||
|
as a second release candidate:
|
||||||
|
|
||||||
|
* `otel`: `v1.0.0-RC2`
|
||||||
|
* `otel/trace`: `v1.0.0-RC2`
|
||||||
|
* `otel/baggage`: `v1.0.0-RC2`
|
||||||
|
* `otel/sdk/trace`: `v1.0.0-RC2`
|
||||||
|
|
||||||
|
Notice that all module version numbers are incremented to adhere to our
|
||||||
|
versioning policy.
|
||||||
|
|
||||||
|
After these release candidates have been evaluated to satisfaction, they are
|
||||||
|
released as version `v1.0.0`.
|
||||||
|
|
||||||
|
* `otel`: `v1.0.0`
|
||||||
|
* `otel/trace`: `v1.0.0`
|
||||||
|
* `otel/baggage`: `v1.0.0`
|
||||||
|
* `otel/sdk/trace`: `v1.0.0`
|
||||||
|
|
||||||
|
Since both the `go` utility and the Go module system support [the semantic
|
||||||
|
versioning definition of
|
||||||
|
precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release
|
||||||
|
will correctly be interpreted as the successor to the previous release
|
||||||
|
candidates.
|
||||||
|
|
||||||
|
Active development of this project continues. The `otel/metric` module now has
|
||||||
|
backwards incompatible changes to its API that need to be released and the
|
||||||
|
`otel/baggage` module has a minor bug fix that needs to be released. The
|
||||||
|
following release is made:
|
||||||
|
|
||||||
|
* `otel`: `v1.0.1`
|
||||||
|
* `otel/trace`: `v1.0.1`
|
||||||
|
* `otel/metric`: `v0.15.0`
|
||||||
|
* `otel/baggage`: `v1.0.1`
|
||||||
|
* `otel/sdk/trace`: `v1.0.1`
|
||||||
|
* `otel/sdk/metric`: `v0.15.0`
|
||||||
|
|
||||||
|
Notice that, again, all stable module versions are incremented in unison and
|
||||||
|
the `otel/sdk/metric` package, which depends on the `otel/metric` package, also
|
||||||
|
bumped its version. This bump of the `otel/sdk/metric` package makes sense
|
||||||
|
given their coupling, though it is not explicitly required by our versioning
|
||||||
|
policy.
|
||||||
|
|
||||||
|
As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a
|
||||||
|
point where they should be evaluated for stability. The `otel` module is
|
||||||
|
reintegrated with the `otel/metric` package and the following release is made:
|
||||||
|
|
||||||
|
* `otel`: `v1.1.0-RC1`
|
||||||
|
* `otel/trace`: `v1.1.0-RC1`
|
||||||
|
* `otel/metric`: `v1.1.0-RC1`
|
||||||
|
* `otel/baggage`: `v1.1.0-RC1`
|
||||||
|
* `otel/sdk/trace`: `v1.1.0-RC1`
|
||||||
|
* `otel/sdk/metric`: `v1.1.0-RC1`
|
||||||
|
|
||||||
|
All the modules are evaluated and determined to a viable stable release. They
|
||||||
|
are then released as version `v1.1.0` (the minor version is incremented to
|
||||||
|
indicate the addition of new signal).
|
||||||
|
|
||||||
|
* `otel`: `v1.1.0`
|
||||||
|
* `otel/trace`: `v1.1.0`
|
||||||
|
* `otel/metric`: `v1.1.0`
|
||||||
|
* `otel/baggage`: `v1.1.0`
|
||||||
|
* `otel/sdk/trace`: `v1.1.0`
|
||||||
|
* `otel/sdk/metric`: `v1.1.0`
|
|
@ -0,0 +1,16 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
// Package attribute provides key and value attributes.
|
||||||
|
package attribute // import "go.opentelemetry.io/otel/attribute"
|
|
@ -0,0 +1,150 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package attribute // import "go.opentelemetry.io/otel/attribute"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"sync"
|
||||||
|
"sync/atomic"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Encoder is a mechanism for serializing a label set into a
|
||||||
|
// specific string representation that supports caching, to
|
||||||
|
// avoid repeated serialization. An example could be an
|
||||||
|
// exporter encoding the label set into a wire representation.
|
||||||
|
Encoder interface {
|
||||||
|
// Encode returns the serialized encoding of the label
|
||||||
|
// set using its Iterator. This result may be cached
|
||||||
|
// by a attribute.Set.
|
||||||
|
Encode(iterator Iterator) string
|
||||||
|
|
||||||
|
// ID returns a value that is unique for each class of
|
||||||
|
// label encoder. Label encoders allocate these using
|
||||||
|
// `NewEncoderID`.
|
||||||
|
ID() EncoderID
|
||||||
|
}
|
||||||
|
|
||||||
|
// EncoderID is used to identify distinct Encoder
|
||||||
|
// implementations, for caching encoded results.
|
||||||
|
EncoderID struct {
|
||||||
|
value uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// defaultLabelEncoder uses a sync.Pool of buffers to reduce
|
||||||
|
// the number of allocations used in encoding labels. This
|
||||||
|
// implementation encodes a comma-separated list of key=value,
|
||||||
|
// with '/'-escaping of '=', ',', and '\'.
|
||||||
|
defaultLabelEncoder struct {
|
||||||
|
// pool is a pool of labelset builders. The buffers in this
|
||||||
|
// pool grow to a size that most label encodings will not
|
||||||
|
// allocate new memory.
|
||||||
|
pool sync.Pool // *bytes.Buffer
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// escapeChar is used to ensure uniqueness of the label encoding where
|
||||||
|
// keys or values contain either '=' or ','. Since there is no parser
|
||||||
|
// needed for this encoding and its only requirement is to be unique,
|
||||||
|
// this choice is arbitrary. Users will see these in some exporters
|
||||||
|
// (e.g., stdout), so the backslash ('\') is used as a conventional choice.
|
||||||
|
const escapeChar = '\\'
|
||||||
|
|
||||||
|
var (
|
||||||
|
_ Encoder = &defaultLabelEncoder{}
|
||||||
|
|
||||||
|
// encoderIDCounter is for generating IDs for other label
|
||||||
|
// encoders.
|
||||||
|
encoderIDCounter uint64
|
||||||
|
|
||||||
|
defaultEncoderOnce sync.Once
|
||||||
|
defaultEncoderID = NewEncoderID()
|
||||||
|
defaultEncoderInstance *defaultLabelEncoder
|
||||||
|
)
|
||||||
|
|
||||||
|
// NewEncoderID returns a unique label encoder ID. It should be
|
||||||
|
// called once per each type of label encoder. Preferably in init() or
|
||||||
|
// in var definition.
|
||||||
|
func NewEncoderID() EncoderID {
|
||||||
|
return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)}
|
||||||
|
}
|
||||||
|
|
||||||
|
// DefaultEncoder returns a label encoder that encodes labels
|
||||||
|
// in such a way that each escaped label's key is followed by an equal
|
||||||
|
// sign and then by an escaped label's value. All key-value pairs are
|
||||||
|
// separated by a comma.
|
||||||
|
//
|
||||||
|
// Escaping is done by prepending a backslash before either a
|
||||||
|
// backslash, equal sign or a comma.
|
||||||
|
func DefaultEncoder() Encoder {
|
||||||
|
defaultEncoderOnce.Do(func() {
|
||||||
|
defaultEncoderInstance = &defaultLabelEncoder{
|
||||||
|
pool: sync.Pool{
|
||||||
|
New: func() interface{} {
|
||||||
|
return &bytes.Buffer{}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
})
|
||||||
|
return defaultEncoderInstance
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encode is a part of an implementation of the LabelEncoder
|
||||||
|
// interface.
|
||||||
|
func (d *defaultLabelEncoder) Encode(iter Iterator) string {
|
||||||
|
buf := d.pool.Get().(*bytes.Buffer)
|
||||||
|
defer d.pool.Put(buf)
|
||||||
|
buf.Reset()
|
||||||
|
|
||||||
|
for iter.Next() {
|
||||||
|
i, keyValue := iter.IndexedLabel()
|
||||||
|
if i > 0 {
|
||||||
|
_, _ = buf.WriteRune(',')
|
||||||
|
}
|
||||||
|
copyAndEscape(buf, string(keyValue.Key))
|
||||||
|
|
||||||
|
_, _ = buf.WriteRune('=')
|
||||||
|
|
||||||
|
if keyValue.Value.Type() == STRING {
|
||||||
|
copyAndEscape(buf, keyValue.Value.AsString())
|
||||||
|
} else {
|
||||||
|
_, _ = buf.WriteString(keyValue.Value.Emit())
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return buf.String()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ID is a part of an implementation of the LabelEncoder interface.
|
||||||
|
func (*defaultLabelEncoder) ID() EncoderID {
|
||||||
|
return defaultEncoderID
|
||||||
|
}
|
||||||
|
|
||||||
|
// copyAndEscape escapes `=`, `,` and its own escape character (`\`),
|
||||||
|
// making the default encoding unique.
|
||||||
|
func copyAndEscape(buf *bytes.Buffer, val string) {
|
||||||
|
for _, ch := range val {
|
||||||
|
switch ch {
|
||||||
|
case '=', ',', escapeChar:
|
||||||
|
buf.WriteRune(escapeChar)
|
||||||
|
}
|
||||||
|
buf.WriteRune(ch)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid returns true if this encoder ID was allocated by
|
||||||
|
// `NewEncoderID`. Invalid encoder IDs will not be cached.
|
||||||
|
func (id EncoderID) Valid() bool {
|
||||||
|
return id.value != 0
|
||||||
|
}
|
|
@ -0,0 +1,143 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package attribute // import "go.opentelemetry.io/otel/attribute"
|
||||||
|
|
||||||
|
// Iterator allows iterating over the set of labels in order,
|
||||||
|
// sorted by key.
|
||||||
|
type Iterator struct {
|
||||||
|
storage *Set
|
||||||
|
idx int
|
||||||
|
}
|
||||||
|
|
||||||
|
// MergeIterator supports iterating over two sets of labels while
|
||||||
|
// eliminating duplicate values from the combined set. The first
|
||||||
|
// iterator value takes precedence.
|
||||||
|
type MergeIterator struct {
|
||||||
|
one oneIterator
|
||||||
|
two oneIterator
|
||||||
|
current KeyValue
|
||||||
|
}
|
||||||
|
|
||||||
|
type oneIterator struct {
|
||||||
|
iter Iterator
|
||||||
|
done bool
|
||||||
|
label KeyValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next moves the iterator to the next position. Returns false if there
|
||||||
|
// are no more labels.
|
||||||
|
func (i *Iterator) Next() bool {
|
||||||
|
i.idx++
|
||||||
|
return i.idx < i.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Label returns current KeyValue. Must be called only after Next returns
|
||||||
|
// true.
|
||||||
|
func (i *Iterator) Label() KeyValue {
|
||||||
|
kv, _ := i.storage.Get(i.idx)
|
||||||
|
return kv
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attribute is a synonym for Label().
|
||||||
|
func (i *Iterator) Attribute() KeyValue {
|
||||||
|
return i.Label()
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexedLabel returns current index and attribute. Must be called only
|
||||||
|
// after Next returns true.
|
||||||
|
func (i *Iterator) IndexedLabel() (int, KeyValue) {
|
||||||
|
return i.idx, i.Label()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns a number of labels in the iterator's `*Set`.
|
||||||
|
func (i *Iterator) Len() int {
|
||||||
|
return i.storage.Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToSlice is a convenience function that creates a slice of labels
|
||||||
|
// from the passed iterator. The iterator is set up to start from the
|
||||||
|
// beginning before creating the slice.
|
||||||
|
func (i *Iterator) ToSlice() []KeyValue {
|
||||||
|
l := i.Len()
|
||||||
|
if l == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
i.idx = -1
|
||||||
|
slice := make([]KeyValue, 0, l)
|
||||||
|
for i.Next() {
|
||||||
|
slice = append(slice, i.Label())
|
||||||
|
}
|
||||||
|
return slice
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMergeIterator returns a MergeIterator for merging two label sets
|
||||||
|
// Duplicates are resolved by taking the value from the first set.
|
||||||
|
func NewMergeIterator(s1, s2 *Set) MergeIterator {
|
||||||
|
mi := MergeIterator{
|
||||||
|
one: makeOne(s1.Iter()),
|
||||||
|
two: makeOne(s2.Iter()),
|
||||||
|
}
|
||||||
|
return mi
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeOne(iter Iterator) oneIterator {
|
||||||
|
oi := oneIterator{
|
||||||
|
iter: iter,
|
||||||
|
}
|
||||||
|
oi.advance()
|
||||||
|
return oi
|
||||||
|
}
|
||||||
|
|
||||||
|
func (oi *oneIterator) advance() {
|
||||||
|
if oi.done = !oi.iter.Next(); !oi.done {
|
||||||
|
oi.label = oi.iter.Label()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Next returns true if there is another label available.
|
||||||
|
func (m *MergeIterator) Next() bool {
|
||||||
|
if m.one.done && m.two.done {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
if m.one.done {
|
||||||
|
m.current = m.two.label
|
||||||
|
m.two.advance()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if m.two.done {
|
||||||
|
m.current = m.one.label
|
||||||
|
m.one.advance()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if m.one.label.Key == m.two.label.Key {
|
||||||
|
m.current = m.one.label // first iterator label value wins
|
||||||
|
m.one.advance()
|
||||||
|
m.two.advance()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
if m.one.label.Key < m.two.label.Key {
|
||||||
|
m.current = m.one.label
|
||||||
|
m.one.advance()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
m.current = m.two.label
|
||||||
|
m.two.advance()
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// Label returns the current value after Next() returns true.
|
||||||
|
func (m *MergeIterator) Label() KeyValue {
|
||||||
|
return m.current
|
||||||
|
}
|
|
@ -0,0 +1,134 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package attribute // import "go.opentelemetry.io/otel/attribute"
|
||||||
|
|
||||||
|
// Key represents the key part in key-value pairs. It's a string. The
|
||||||
|
// allowed character set in the key depends on the use of the key.
|
||||||
|
type Key string
|
||||||
|
|
||||||
|
// Bool creates a KeyValue instance with a BOOL Value.
|
||||||
|
//
|
||||||
|
// If creating both a key and value at the same time, use the provided
|
||||||
|
// convenience function instead -- Bool(name, value).
|
||||||
|
func (k Key) Bool(v bool) KeyValue {
|
||||||
|
return KeyValue{
|
||||||
|
Key: k,
|
||||||
|
Value: BoolValue(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolSlice creates a KeyValue instance with a BOOLSLICE Value.
|
||||||
|
//
|
||||||
|
// If creating both a key and value at the same time, use the provided
|
||||||
|
// convenience function instead -- BoolSlice(name, value).
|
||||||
|
func (k Key) BoolSlice(v []bool) KeyValue {
|
||||||
|
return KeyValue{
|
||||||
|
Key: k,
|
||||||
|
Value: BoolSliceValue(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int creates a KeyValue instance with an INT64 Value.
|
||||||
|
//
|
||||||
|
// If creating both a key and value at the same time, use the provided
|
||||||
|
// convenience function instead -- Int(name, value).
|
||||||
|
func (k Key) Int(v int) KeyValue {
|
||||||
|
return KeyValue{
|
||||||
|
Key: k,
|
||||||
|
Value: IntValue(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntSlice creates a KeyValue instance with an INT64SLICE Value.
|
||||||
|
//
|
||||||
|
// If creating both a key and value at the same time, use the provided
|
||||||
|
// convenience function instead -- IntSlice(name, value).
|
||||||
|
func (k Key) IntSlice(v []int) KeyValue {
|
||||||
|
return KeyValue{
|
||||||
|
Key: k,
|
||||||
|
Value: IntSliceValue(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 creates a KeyValue instance with an INT64 Value.
|
||||||
|
//
|
||||||
|
// If creating both a key and value at the same time, use the provided
|
||||||
|
// convenience function instead -- Int64(name, value).
|
||||||
|
func (k Key) Int64(v int64) KeyValue {
|
||||||
|
return KeyValue{
|
||||||
|
Key: k,
|
||||||
|
Value: Int64Value(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Slice creates a KeyValue instance with an INT64SLICE Value.
|
||||||
|
//
|
||||||
|
// If creating both a key and value at the same time, use the provided
|
||||||
|
// convenience function instead -- Int64Slice(name, value).
|
||||||
|
func (k Key) Int64Slice(v []int64) KeyValue {
|
||||||
|
return KeyValue{
|
||||||
|
Key: k,
|
||||||
|
Value: Int64SliceValue(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 creates a KeyValue instance with a FLOAT64 Value.
|
||||||
|
//
|
||||||
|
// If creating both a key and value at the same time, use the provided
|
||||||
|
// convenience function instead -- Float64(name, value).
|
||||||
|
func (k Key) Float64(v float64) KeyValue {
|
||||||
|
return KeyValue{
|
||||||
|
Key: k,
|
||||||
|
Value: Float64Value(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value.
|
||||||
|
//
|
||||||
|
// If creating both a key and value at the same time, use the provided
|
||||||
|
// convenience function instead -- Float64(name, value).
|
||||||
|
func (k Key) Float64Slice(v []float64) KeyValue {
|
||||||
|
return KeyValue{
|
||||||
|
Key: k,
|
||||||
|
Value: Float64SliceValue(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// String creates a KeyValue instance with a STRING Value.
|
||||||
|
//
|
||||||
|
// If creating both a key and value at the same time, use the provided
|
||||||
|
// convenience function instead -- String(name, value).
|
||||||
|
func (k Key) String(v string) KeyValue {
|
||||||
|
return KeyValue{
|
||||||
|
Key: k,
|
||||||
|
Value: StringValue(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringSlice creates a KeyValue instance with a STRINGSLICE Value.
|
||||||
|
//
|
||||||
|
// If creating both a key and value at the same time, use the provided
|
||||||
|
// convenience function instead -- StringSlice(name, value).
|
||||||
|
func (k Key) StringSlice(v []string) KeyValue {
|
||||||
|
return KeyValue{
|
||||||
|
Key: k,
|
||||||
|
Value: StringSliceValue(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Defined returns true for non-empty keys.
|
||||||
|
func (k Key) Defined() bool {
|
||||||
|
return len(k) != 0
|
||||||
|
}
|
|
@ -0,0 +1,86 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package attribute // import "go.opentelemetry.io/otel/attribute"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
)
|
||||||
|
|
||||||
|
// KeyValue holds a key and value pair.
|
||||||
|
type KeyValue struct {
|
||||||
|
Key Key
|
||||||
|
Value Value
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid returns if kv is a valid OpenTelemetry attribute.
|
||||||
|
func (kv KeyValue) Valid() bool {
|
||||||
|
return kv.Key.Defined() && kv.Value.Type() != INVALID
|
||||||
|
}
|
||||||
|
|
||||||
|
// Bool creates a KeyValue with a BOOL Value type.
|
||||||
|
func Bool(k string, v bool) KeyValue {
|
||||||
|
return Key(k).Bool(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolSlice creates a KeyValue with a BOOLSLICE Value type.
|
||||||
|
func BoolSlice(k string, v []bool) KeyValue {
|
||||||
|
return Key(k).BoolSlice(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int creates a KeyValue with an INT64 Value type.
|
||||||
|
func Int(k string, v int) KeyValue {
|
||||||
|
return Key(k).Int(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntSlice creates a KeyValue with an INT64SLICE Value type.
|
||||||
|
func IntSlice(k string, v []int) KeyValue {
|
||||||
|
return Key(k).IntSlice(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64 creates a KeyValue with an INT64 Value type.
|
||||||
|
func Int64(k string, v int64) KeyValue {
|
||||||
|
return Key(k).Int64(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Slice creates a KeyValue with an INT64SLICE Value type.
|
||||||
|
func Int64Slice(k string, v []int64) KeyValue {
|
||||||
|
return Key(k).Int64Slice(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64 creates a KeyValue with a FLOAT64 Value type.
|
||||||
|
func Float64(k string, v float64) KeyValue {
|
||||||
|
return Key(k).Float64(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type.
|
||||||
|
func Float64Slice(k string, v []float64) KeyValue {
|
||||||
|
return Key(k).Float64Slice(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String creates a KeyValue with a STRING Value type.
|
||||||
|
func String(k, v string) KeyValue {
|
||||||
|
return Key(k).String(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringSlice creates a KeyValue with a STRINGSLICE Value type.
|
||||||
|
func StringSlice(k string, v []string) KeyValue {
|
||||||
|
return Key(k).StringSlice(v)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Stringer creates a new key-value pair with a passed name and a string
|
||||||
|
// value generated by the passed Stringer interface.
|
||||||
|
func Stringer(k string, v fmt.Stringer) KeyValue {
|
||||||
|
return Key(k).String(v.String())
|
||||||
|
}
|
|
@ -0,0 +1,435 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package attribute // import "go.opentelemetry.io/otel/attribute"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"reflect"
|
||||||
|
"sort"
|
||||||
|
)
|
||||||
|
|
||||||
|
type (
|
||||||
|
// Set is the representation for a distinct label set. It
|
||||||
|
// manages an immutable set of labels, with an internal cache
|
||||||
|
// for storing label encodings.
|
||||||
|
//
|
||||||
|
// This type supports the `Equivalent` method of comparison
|
||||||
|
// using values of type `Distinct`.
|
||||||
|
//
|
||||||
|
// This type is used to implement:
|
||||||
|
// 1. Metric labels
|
||||||
|
// 2. Resource sets
|
||||||
|
// 3. Correlation map (TODO)
|
||||||
|
Set struct {
|
||||||
|
equivalent Distinct
|
||||||
|
}
|
||||||
|
|
||||||
|
// Distinct wraps a variable-size array of `KeyValue`,
|
||||||
|
// constructed with keys in sorted order. This can be used as
|
||||||
|
// a map key or for equality checking between Sets.
|
||||||
|
Distinct struct {
|
||||||
|
iface interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter supports removing certain labels from label sets.
|
||||||
|
// When the filter returns true, the label will be kept in
|
||||||
|
// the filtered label set. When the filter returns false, the
|
||||||
|
// label is excluded from the filtered label set, and the
|
||||||
|
// label instead appears in the `removed` list of excluded labels.
|
||||||
|
Filter func(KeyValue) bool
|
||||||
|
|
||||||
|
// Sortable implements `sort.Interface`, used for sorting
|
||||||
|
// `KeyValue`. This is an exported type to support a
|
||||||
|
// memory optimization. A pointer to one of these is needed
|
||||||
|
// for the call to `sort.Stable()`, which the caller may
|
||||||
|
// provide in order to avoid an allocation. See
|
||||||
|
// `NewSetWithSortable()`.
|
||||||
|
Sortable []KeyValue
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
// keyValueType is used in `computeDistinctReflect`.
|
||||||
|
keyValueType = reflect.TypeOf(KeyValue{})
|
||||||
|
|
||||||
|
// emptySet is returned for empty label sets.
|
||||||
|
emptySet = &Set{
|
||||||
|
equivalent: Distinct{
|
||||||
|
iface: [0]KeyValue{},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
// EmptySet returns a reference to a Set with no elements.
|
||||||
|
//
|
||||||
|
// This is a convenience provided for optimized calling utility.
|
||||||
|
func EmptySet() *Set {
|
||||||
|
return emptySet
|
||||||
|
}
|
||||||
|
|
||||||
|
// reflect abbreviates `reflect.ValueOf`.
|
||||||
|
func (d Distinct) reflect() reflect.Value {
|
||||||
|
return reflect.ValueOf(d.iface)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Valid returns true if this value refers to a valid `*Set`.
|
||||||
|
func (d Distinct) Valid() bool {
|
||||||
|
return d.iface != nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of labels in this set.
|
||||||
|
func (l *Set) Len() int {
|
||||||
|
if l == nil || !l.equivalent.Valid() {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
return l.equivalent.reflect().Len()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get returns the KeyValue at ordered position `idx` in this set.
|
||||||
|
func (l *Set) Get(idx int) (KeyValue, bool) {
|
||||||
|
if l == nil {
|
||||||
|
return KeyValue{}, false
|
||||||
|
}
|
||||||
|
value := l.equivalent.reflect()
|
||||||
|
|
||||||
|
if idx >= 0 && idx < value.Len() {
|
||||||
|
// Note: The Go compiler successfully avoids an allocation for
|
||||||
|
// the interface{} conversion here:
|
||||||
|
return value.Index(idx).Interface().(KeyValue), true
|
||||||
|
}
|
||||||
|
|
||||||
|
return KeyValue{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the value of a specified key in this set.
|
||||||
|
func (l *Set) Value(k Key) (Value, bool) {
|
||||||
|
if l == nil {
|
||||||
|
return Value{}, false
|
||||||
|
}
|
||||||
|
rValue := l.equivalent.reflect()
|
||||||
|
vlen := rValue.Len()
|
||||||
|
|
||||||
|
idx := sort.Search(vlen, func(idx int) bool {
|
||||||
|
return rValue.Index(idx).Interface().(KeyValue).Key >= k
|
||||||
|
})
|
||||||
|
if idx >= vlen {
|
||||||
|
return Value{}, false
|
||||||
|
}
|
||||||
|
keyValue := rValue.Index(idx).Interface().(KeyValue)
|
||||||
|
if k == keyValue.Key {
|
||||||
|
return keyValue.Value, true
|
||||||
|
}
|
||||||
|
return Value{}, false
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasValue tests whether a key is defined in this set.
|
||||||
|
func (l *Set) HasValue(k Key) bool {
|
||||||
|
if l == nil {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
_, ok := l.Value(k)
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Iter returns an iterator for visiting the labels in this set.
|
||||||
|
func (l *Set) Iter() Iterator {
|
||||||
|
return Iterator{
|
||||||
|
storage: l,
|
||||||
|
idx: -1,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// ToSlice returns the set of labels belonging to this set, sorted,
|
||||||
|
// where keys appear no more than once.
|
||||||
|
func (l *Set) ToSlice() []KeyValue {
|
||||||
|
iter := l.Iter()
|
||||||
|
return iter.ToSlice()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equivalent returns a value that may be used as a map key. The
|
||||||
|
// Distinct type guarantees that the result will equal the equivalent
|
||||||
|
// Distinct value of any label set with the same elements as this,
|
||||||
|
// where sets are made unique by choosing the last value in the input
|
||||||
|
// for any given key.
|
||||||
|
func (l *Set) Equivalent() Distinct {
|
||||||
|
if l == nil || !l.equivalent.Valid() {
|
||||||
|
return emptySet.equivalent
|
||||||
|
}
|
||||||
|
return l.equivalent
|
||||||
|
}
|
||||||
|
|
||||||
|
// Equals returns true if the argument set is equivalent to this set.
|
||||||
|
func (l *Set) Equals(o *Set) bool {
|
||||||
|
return l.Equivalent() == o.Equivalent()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Encoded returns the encoded form of this set, according to
|
||||||
|
// `encoder`.
|
||||||
|
func (l *Set) Encoded(encoder Encoder) string {
|
||||||
|
if l == nil || encoder == nil {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
|
||||||
|
return encoder.Encode(l.Iter())
|
||||||
|
}
|
||||||
|
|
||||||
|
func empty() Set {
|
||||||
|
return Set{
|
||||||
|
equivalent: emptySet.equivalent,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSet returns a new `Set`. See the documentation for
|
||||||
|
// `NewSetWithSortableFiltered` for more details.
|
||||||
|
//
|
||||||
|
// Except for empty sets, this method adds an additional allocation
|
||||||
|
// compared with calls that include a `*Sortable`.
|
||||||
|
func NewSet(kvs ...KeyValue) Set {
|
||||||
|
// Check for empty set.
|
||||||
|
if len(kvs) == 0 {
|
||||||
|
return empty()
|
||||||
|
}
|
||||||
|
s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSetWithSortable returns a new `Set`. See the documentation for
|
||||||
|
// `NewSetWithSortableFiltered` for more details.
|
||||||
|
//
|
||||||
|
// This call includes a `*Sortable` option as a memory optimization.
|
||||||
|
func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set {
|
||||||
|
// Check for empty set.
|
||||||
|
if len(kvs) == 0 {
|
||||||
|
return empty()
|
||||||
|
}
|
||||||
|
s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSetWithFiltered returns a new `Set`. See the documentation for
|
||||||
|
// `NewSetWithSortableFiltered` for more details.
|
||||||
|
//
|
||||||
|
// This call includes a `Filter` to include/exclude label keys from
|
||||||
|
// the return value. Excluded keys are returned as a slice of label
|
||||||
|
// values.
|
||||||
|
func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
|
||||||
|
// Check for empty set.
|
||||||
|
if len(kvs) == 0 {
|
||||||
|
return empty(), nil
|
||||||
|
}
|
||||||
|
return NewSetWithSortableFiltered(kvs, new(Sortable), filter)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewSetWithSortableFiltered returns a new `Set`.
|
||||||
|
//
|
||||||
|
// Duplicate keys are eliminated by taking the last value. This
|
||||||
|
// re-orders the input slice so that unique last-values are contiguous
|
||||||
|
// at the end of the slice.
|
||||||
|
//
|
||||||
|
// This ensures the following:
|
||||||
|
//
|
||||||
|
// - Last-value-wins semantics
|
||||||
|
// - Caller sees the reordering, but doesn't lose values
|
||||||
|
// - Repeated call preserve last-value wins.
|
||||||
|
//
|
||||||
|
// Note that methods are defined on `*Set`, although this returns `Set`.
|
||||||
|
// Callers can avoid memory allocations by:
|
||||||
|
//
|
||||||
|
// - allocating a `Sortable` for use as a temporary in this method
|
||||||
|
// - allocating a `Set` for storing the return value of this
|
||||||
|
// constructor.
|
||||||
|
//
|
||||||
|
// The result maintains a cache of encoded labels, by attribute.EncoderID.
|
||||||
|
// This value should not be copied after its first use.
|
||||||
|
//
|
||||||
|
// The second `[]KeyValue` return value is a list of labels that were
|
||||||
|
// excluded by the Filter (if non-nil).
|
||||||
|
func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) {
|
||||||
|
// Check for empty set.
|
||||||
|
if len(kvs) == 0 {
|
||||||
|
return empty(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
*tmp = kvs
|
||||||
|
|
||||||
|
// Stable sort so the following de-duplication can implement
|
||||||
|
// last-value-wins semantics.
|
||||||
|
sort.Stable(tmp)
|
||||||
|
|
||||||
|
*tmp = nil
|
||||||
|
|
||||||
|
position := len(kvs) - 1
|
||||||
|
offset := position - 1
|
||||||
|
|
||||||
|
// The requirements stated above require that the stable
|
||||||
|
// result be placed in the end of the input slice, while
|
||||||
|
// overwritten values are swapped to the beginning.
|
||||||
|
//
|
||||||
|
// De-duplicate with last-value-wins semantics. Preserve
|
||||||
|
// duplicate values at the beginning of the input slice.
|
||||||
|
for ; offset >= 0; offset-- {
|
||||||
|
if kvs[offset].Key == kvs[position].Key {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
position--
|
||||||
|
kvs[offset], kvs[position] = kvs[position], kvs[offset]
|
||||||
|
}
|
||||||
|
if filter != nil {
|
||||||
|
return filterSet(kvs[position:], filter)
|
||||||
|
}
|
||||||
|
return Set{
|
||||||
|
equivalent: computeDistinct(kvs[position:]),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// filterSet reorders `kvs` so that included keys are contiguous at
|
||||||
|
// the end of the slice, while excluded keys precede the included keys.
|
||||||
|
func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
|
||||||
|
var excluded []KeyValue
|
||||||
|
|
||||||
|
// Move labels that do not match the filter so
|
||||||
|
// they're adjacent before calling computeDistinct().
|
||||||
|
distinctPosition := len(kvs)
|
||||||
|
|
||||||
|
// Swap indistinct keys forward and distinct keys toward the
|
||||||
|
// end of the slice.
|
||||||
|
offset := len(kvs) - 1
|
||||||
|
for ; offset >= 0; offset-- {
|
||||||
|
if filter(kvs[offset]) {
|
||||||
|
distinctPosition--
|
||||||
|
kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset]
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
excluded = kvs[:distinctPosition]
|
||||||
|
|
||||||
|
return Set{
|
||||||
|
equivalent: computeDistinct(kvs[distinctPosition:]),
|
||||||
|
}, excluded
|
||||||
|
}
|
||||||
|
|
||||||
|
// Filter returns a filtered copy of this `Set`. See the
|
||||||
|
// documentation for `NewSetWithSortableFiltered` for more details.
|
||||||
|
func (l *Set) Filter(re Filter) (Set, []KeyValue) {
|
||||||
|
if re == nil {
|
||||||
|
return Set{
|
||||||
|
equivalent: l.equivalent,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Note: This could be refactored to avoid the temporary slice
|
||||||
|
// allocation, if it proves to be expensive.
|
||||||
|
return filterSet(l.ToSlice(), re)
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeDistinct returns a `Distinct` using either the fixed- or
|
||||||
|
// reflect-oriented code path, depending on the size of the input.
|
||||||
|
// The input slice is assumed to already be sorted and de-duplicated.
|
||||||
|
func computeDistinct(kvs []KeyValue) Distinct {
|
||||||
|
iface := computeDistinctFixed(kvs)
|
||||||
|
if iface == nil {
|
||||||
|
iface = computeDistinctReflect(kvs)
|
||||||
|
}
|
||||||
|
return Distinct{
|
||||||
|
iface: iface,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeDistinctFixed computes a `Distinct` for small slices. It
|
||||||
|
// returns nil if the input is too large for this code path.
|
||||||
|
func computeDistinctFixed(kvs []KeyValue) interface{} {
|
||||||
|
switch len(kvs) {
|
||||||
|
case 1:
|
||||||
|
ptr := new([1]KeyValue)
|
||||||
|
copy((*ptr)[:], kvs)
|
||||||
|
return *ptr
|
||||||
|
case 2:
|
||||||
|
ptr := new([2]KeyValue)
|
||||||
|
copy((*ptr)[:], kvs)
|
||||||
|
return *ptr
|
||||||
|
case 3:
|
||||||
|
ptr := new([3]KeyValue)
|
||||||
|
copy((*ptr)[:], kvs)
|
||||||
|
return *ptr
|
||||||
|
case 4:
|
||||||
|
ptr := new([4]KeyValue)
|
||||||
|
copy((*ptr)[:], kvs)
|
||||||
|
return *ptr
|
||||||
|
case 5:
|
||||||
|
ptr := new([5]KeyValue)
|
||||||
|
copy((*ptr)[:], kvs)
|
||||||
|
return *ptr
|
||||||
|
case 6:
|
||||||
|
ptr := new([6]KeyValue)
|
||||||
|
copy((*ptr)[:], kvs)
|
||||||
|
return *ptr
|
||||||
|
case 7:
|
||||||
|
ptr := new([7]KeyValue)
|
||||||
|
copy((*ptr)[:], kvs)
|
||||||
|
return *ptr
|
||||||
|
case 8:
|
||||||
|
ptr := new([8]KeyValue)
|
||||||
|
copy((*ptr)[:], kvs)
|
||||||
|
return *ptr
|
||||||
|
case 9:
|
||||||
|
ptr := new([9]KeyValue)
|
||||||
|
copy((*ptr)[:], kvs)
|
||||||
|
return *ptr
|
||||||
|
case 10:
|
||||||
|
ptr := new([10]KeyValue)
|
||||||
|
copy((*ptr)[:], kvs)
|
||||||
|
return *ptr
|
||||||
|
default:
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// computeDistinctReflect computes a `Distinct` using reflection,
|
||||||
|
// works for any size input.
|
||||||
|
func computeDistinctReflect(kvs []KeyValue) interface{} {
|
||||||
|
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
|
||||||
|
for i, keyValue := range kvs {
|
||||||
|
*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
|
||||||
|
}
|
||||||
|
return at.Interface()
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON returns the JSON encoding of the `*Set`.
|
||||||
|
func (l *Set) MarshalJSON() ([]byte, error) {
|
||||||
|
return json.Marshal(l.equivalent.iface)
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
|
||||||
|
func (l Set) MarshalLog() interface{} {
|
||||||
|
kvs := make(map[string]string)
|
||||||
|
for _, kv := range l.ToSlice() {
|
||||||
|
kvs[string(kv.Key)] = kv.Value.Emit()
|
||||||
|
}
|
||||||
|
return kvs
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len implements `sort.Interface`.
|
||||||
|
func (l *Sortable) Len() int {
|
||||||
|
return len(*l)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Swap implements `sort.Interface`.
|
||||||
|
func (l *Sortable) Swap(i, j int) {
|
||||||
|
(*l)[i], (*l)[j] = (*l)[j], (*l)[i]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Less implements `sort.Interface`.
|
||||||
|
func (l *Sortable) Less(i, j int) bool {
|
||||||
|
return (*l)[i].Key < (*l)[j].Key
|
||||||
|
}
|
|
@ -0,0 +1,31 @@
|
||||||
|
// Code generated by "stringer -type=Type"; DO NOT EDIT.
|
||||||
|
|
||||||
|
package attribute
|
||||||
|
|
||||||
|
import "strconv"
|
||||||
|
|
||||||
|
func _() {
|
||||||
|
// An "invalid array index" compiler error signifies that the constant values have changed.
|
||||||
|
// Re-run the stringer command to generate them again.
|
||||||
|
var x [1]struct{}
|
||||||
|
_ = x[INVALID-0]
|
||||||
|
_ = x[BOOL-1]
|
||||||
|
_ = x[INT64-2]
|
||||||
|
_ = x[FLOAT64-3]
|
||||||
|
_ = x[STRING-4]
|
||||||
|
_ = x[BOOLSLICE-5]
|
||||||
|
_ = x[INT64SLICE-6]
|
||||||
|
_ = x[FLOAT64SLICE-7]
|
||||||
|
_ = x[STRINGSLICE-8]
|
||||||
|
}
|
||||||
|
|
||||||
|
const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE"
|
||||||
|
|
||||||
|
var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
|
||||||
|
|
||||||
|
func (i Type) String() string {
|
||||||
|
if i < 0 || i >= Type(len(_Type_index)-1) {
|
||||||
|
return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
|
||||||
|
}
|
||||||
|
return _Type_name[_Type_index[i]:_Type_index[i+1]]
|
||||||
|
}
|
|
@ -0,0 +1,271 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package attribute // import "go.opentelemetry.io/otel/attribute"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/internal"
|
||||||
|
)
|
||||||
|
|
||||||
|
//go:generate stringer -type=Type
|
||||||
|
|
||||||
|
// Type describes the type of the data Value holds.
|
||||||
|
type Type int
|
||||||
|
|
||||||
|
// Value represents the value part in key-value pairs.
|
||||||
|
type Value struct {
|
||||||
|
vtype Type
|
||||||
|
numeric uint64
|
||||||
|
stringly string
|
||||||
|
slice interface{}
|
||||||
|
}
|
||||||
|
|
||||||
|
const (
|
||||||
|
// INVALID is used for a Value with no value set.
|
||||||
|
INVALID Type = iota
|
||||||
|
// BOOL is a boolean Type Value.
|
||||||
|
BOOL
|
||||||
|
// INT64 is a 64-bit signed integral Type Value.
|
||||||
|
INT64
|
||||||
|
// FLOAT64 is a 64-bit floating point Type Value.
|
||||||
|
FLOAT64
|
||||||
|
// STRING is a string Type Value.
|
||||||
|
STRING
|
||||||
|
// BOOLSLICE is a slice of booleans Type Value.
|
||||||
|
BOOLSLICE
|
||||||
|
// INT64SLICE is a slice of 64-bit signed integral numbers Type Value.
|
||||||
|
INT64SLICE
|
||||||
|
// FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value.
|
||||||
|
FLOAT64SLICE
|
||||||
|
// STRINGSLICE is a slice of strings Type Value.
|
||||||
|
STRINGSLICE
|
||||||
|
)
|
||||||
|
|
||||||
|
// BoolValue creates a BOOL Value.
|
||||||
|
func BoolValue(v bool) Value {
|
||||||
|
return Value{
|
||||||
|
vtype: BOOL,
|
||||||
|
numeric: internal.BoolToRaw(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// BoolSliceValue creates a BOOLSLICE Value.
|
||||||
|
func BoolSliceValue(v []bool) Value {
|
||||||
|
cp := make([]bool, len(v))
|
||||||
|
copy(cp, v)
|
||||||
|
return Value{
|
||||||
|
vtype: BOOLSLICE,
|
||||||
|
slice: &cp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntValue creates an INT64 Value.
|
||||||
|
func IntValue(v int) Value {
|
||||||
|
return Int64Value(int64(v))
|
||||||
|
}
|
||||||
|
|
||||||
|
// IntSliceValue creates an INTSLICE Value.
|
||||||
|
func IntSliceValue(v []int) Value {
|
||||||
|
cp := make([]int64, 0, len(v))
|
||||||
|
for _, i := range v {
|
||||||
|
cp = append(cp, int64(i))
|
||||||
|
}
|
||||||
|
return Value{
|
||||||
|
vtype: INT64SLICE,
|
||||||
|
slice: &cp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64Value creates an INT64 Value.
|
||||||
|
func Int64Value(v int64) Value {
|
||||||
|
return Value{
|
||||||
|
vtype: INT64,
|
||||||
|
numeric: internal.Int64ToRaw(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int64SliceValue creates an INT64SLICE Value.
|
||||||
|
func Int64SliceValue(v []int64) Value {
|
||||||
|
cp := make([]int64, len(v))
|
||||||
|
copy(cp, v)
|
||||||
|
return Value{
|
||||||
|
vtype: INT64SLICE,
|
||||||
|
slice: &cp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64Value creates a FLOAT64 Value.
|
||||||
|
func Float64Value(v float64) Value {
|
||||||
|
return Value{
|
||||||
|
vtype: FLOAT64,
|
||||||
|
numeric: internal.Float64ToRaw(v),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Float64SliceValue creates a FLOAT64SLICE Value.
|
||||||
|
func Float64SliceValue(v []float64) Value {
|
||||||
|
cp := make([]float64, len(v))
|
||||||
|
copy(cp, v)
|
||||||
|
return Value{
|
||||||
|
vtype: FLOAT64SLICE,
|
||||||
|
slice: &cp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringValue creates a STRING Value.
|
||||||
|
func StringValue(v string) Value {
|
||||||
|
return Value{
|
||||||
|
vtype: STRING,
|
||||||
|
stringly: v,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringSliceValue creates a STRINGSLICE Value.
|
||||||
|
func StringSliceValue(v []string) Value {
|
||||||
|
cp := make([]string, len(v))
|
||||||
|
copy(cp, v)
|
||||||
|
return Value{
|
||||||
|
vtype: STRINGSLICE,
|
||||||
|
slice: &cp,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Type returns a type of the Value.
|
||||||
|
func (v Value) Type() Type {
|
||||||
|
return v.vtype
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsBool returns the bool value. Make sure that the Value's type is
|
||||||
|
// BOOL.
|
||||||
|
func (v Value) AsBool() bool {
|
||||||
|
return internal.RawToBool(v.numeric)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsBoolSlice returns the []bool value. Make sure that the Value's type is
|
||||||
|
// BOOLSLICE.
|
||||||
|
func (v Value) AsBoolSlice() []bool {
|
||||||
|
if s, ok := v.slice.(*[]bool); ok {
|
||||||
|
return *s
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsInt64 returns the int64 value. Make sure that the Value's type is
|
||||||
|
// INT64.
|
||||||
|
func (v Value) AsInt64() int64 {
|
||||||
|
return internal.RawToInt64(v.numeric)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsInt64Slice returns the []int64 value. Make sure that the Value's type is
|
||||||
|
// INT64SLICE.
|
||||||
|
func (v Value) AsInt64Slice() []int64 {
|
||||||
|
if s, ok := v.slice.(*[]int64); ok {
|
||||||
|
return *s
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsFloat64 returns the float64 value. Make sure that the Value's
|
||||||
|
// type is FLOAT64.
|
||||||
|
func (v Value) AsFloat64() float64 {
|
||||||
|
return internal.RawToFloat64(v.numeric)
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
|
||||||
|
// FLOAT64SLICE.
|
||||||
|
func (v Value) AsFloat64Slice() []float64 {
|
||||||
|
if s, ok := v.slice.(*[]float64); ok {
|
||||||
|
return *s
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsString returns the string value. Make sure that the Value's type
|
||||||
|
// is STRING.
|
||||||
|
func (v Value) AsString() string {
|
||||||
|
return v.stringly
|
||||||
|
}
|
||||||
|
|
||||||
|
// AsStringSlice returns the []string value. Make sure that the Value's type is
|
||||||
|
// STRINGSLICE.
|
||||||
|
func (v Value) AsStringSlice() []string {
|
||||||
|
if s, ok := v.slice.(*[]string); ok {
|
||||||
|
return *s
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
type unknownValueType struct{}
|
||||||
|
|
||||||
|
// AsInterface returns Value's data as interface{}.
|
||||||
|
func (v Value) AsInterface() interface{} {
|
||||||
|
switch v.Type() {
|
||||||
|
case BOOL:
|
||||||
|
return v.AsBool()
|
||||||
|
case BOOLSLICE:
|
||||||
|
return v.AsBoolSlice()
|
||||||
|
case INT64:
|
||||||
|
return v.AsInt64()
|
||||||
|
case INT64SLICE:
|
||||||
|
return v.AsInt64Slice()
|
||||||
|
case FLOAT64:
|
||||||
|
return v.AsFloat64()
|
||||||
|
case FLOAT64SLICE:
|
||||||
|
return v.AsFloat64Slice()
|
||||||
|
case STRING:
|
||||||
|
return v.stringly
|
||||||
|
case STRINGSLICE:
|
||||||
|
return v.AsStringSlice()
|
||||||
|
}
|
||||||
|
return unknownValueType{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Emit returns a string representation of Value's data.
|
||||||
|
func (v Value) Emit() string {
|
||||||
|
switch v.Type() {
|
||||||
|
case BOOLSLICE:
|
||||||
|
return fmt.Sprint(*(v.slice.(*[]bool)))
|
||||||
|
case BOOL:
|
||||||
|
return strconv.FormatBool(v.AsBool())
|
||||||
|
case INT64SLICE:
|
||||||
|
return fmt.Sprint(*(v.slice.(*[]int64)))
|
||||||
|
case INT64:
|
||||||
|
return strconv.FormatInt(v.AsInt64(), 10)
|
||||||
|
case FLOAT64SLICE:
|
||||||
|
return fmt.Sprint(*(v.slice.(*[]float64)))
|
||||||
|
case FLOAT64:
|
||||||
|
return fmt.Sprint(v.AsFloat64())
|
||||||
|
case STRINGSLICE:
|
||||||
|
return fmt.Sprint(*(v.slice.(*[]string)))
|
||||||
|
case STRING:
|
||||||
|
return v.stringly
|
||||||
|
default:
|
||||||
|
return "unknown"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON returns the JSON encoding of the Value.
|
||||||
|
func (v Value) MarshalJSON() ([]byte, error) {
|
||||||
|
var jsonVal struct {
|
||||||
|
Type string
|
||||||
|
Value interface{}
|
||||||
|
}
|
||||||
|
jsonVal.Type = v.Type().String()
|
||||||
|
jsonVal.Value = v.AsInterface()
|
||||||
|
return json.Marshal(jsonVal)
|
||||||
|
}
|
|
@ -0,0 +1,556 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package baggage // import "go.opentelemetry.io/otel/baggage"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/internal/baggage"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
maxMembers = 180
|
||||||
|
maxBytesPerMembers = 4096
|
||||||
|
maxBytesPerBaggageString = 8192
|
||||||
|
|
||||||
|
listDelimiter = ","
|
||||||
|
keyValueDelimiter = "="
|
||||||
|
propertyDelimiter = ";"
|
||||||
|
|
||||||
|
keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)`
|
||||||
|
valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)`
|
||||||
|
keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*`
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
keyRe = regexp.MustCompile(`^` + keyDef + `$`)
|
||||||
|
valueRe = regexp.MustCompile(`^` + valueDef + `$`)
|
||||||
|
propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`)
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errInvalidKey = errors.New("invalid key")
|
||||||
|
errInvalidValue = errors.New("invalid value")
|
||||||
|
errInvalidProperty = errors.New("invalid baggage list-member property")
|
||||||
|
errInvalidMember = errors.New("invalid baggage list-member")
|
||||||
|
errMemberNumber = errors.New("too many list-members in baggage-string")
|
||||||
|
errMemberBytes = errors.New("list-member too large")
|
||||||
|
errBaggageBytes = errors.New("baggage-string too large")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Property is an additional metadata entry for a baggage list-member.
|
||||||
|
type Property struct {
|
||||||
|
key, value string
|
||||||
|
|
||||||
|
// hasValue indicates if a zero-value value means the property does not
|
||||||
|
// have a value or if it was the zero-value.
|
||||||
|
hasValue bool
|
||||||
|
|
||||||
|
// hasData indicates whether the created property contains data or not.
|
||||||
|
// Properties that do not contain data are invalid with no other check
|
||||||
|
// required.
|
||||||
|
hasData bool
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewKeyProperty(key string) (Property, error) {
|
||||||
|
if !keyRe.MatchString(key) {
|
||||||
|
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||||
|
}
|
||||||
|
|
||||||
|
p := Property{key: key, hasData: true}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewKeyValueProperty(key, value string) (Property, error) {
|
||||||
|
if !keyRe.MatchString(key) {
|
||||||
|
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||||
|
}
|
||||||
|
if !valueRe.MatchString(value) {
|
||||||
|
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
||||||
|
}
|
||||||
|
|
||||||
|
p := Property{
|
||||||
|
key: key,
|
||||||
|
value: value,
|
||||||
|
hasValue: true,
|
||||||
|
hasData: true,
|
||||||
|
}
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInvalidProperty() Property {
|
||||||
|
return Property{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseProperty attempts to decode a Property from the passed string. It
|
||||||
|
// returns an error if the input is invalid according to the W3C Baggage
|
||||||
|
// specification.
|
||||||
|
func parseProperty(property string) (Property, error) {
|
||||||
|
if property == "" {
|
||||||
|
return newInvalidProperty(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
match := propertyRe.FindStringSubmatch(property)
|
||||||
|
if len(match) != 4 {
|
||||||
|
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
|
||||||
|
}
|
||||||
|
|
||||||
|
p := Property{hasData: true}
|
||||||
|
if match[1] != "" {
|
||||||
|
p.key = match[1]
|
||||||
|
} else {
|
||||||
|
p.key = match[2]
|
||||||
|
p.value = match[3]
|
||||||
|
p.hasValue = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return p, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate ensures p conforms to the W3C Baggage specification, returning an
|
||||||
|
// error otherwise.
|
||||||
|
func (p Property) validate() error {
|
||||||
|
errFunc := func(err error) error {
|
||||||
|
return fmt.Errorf("invalid property: %w", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !p.hasData {
|
||||||
|
return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p))
|
||||||
|
}
|
||||||
|
|
||||||
|
if !keyRe.MatchString(p.key) {
|
||||||
|
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
|
||||||
|
}
|
||||||
|
if p.hasValue && !valueRe.MatchString(p.value) {
|
||||||
|
return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
|
||||||
|
}
|
||||||
|
if !p.hasValue && p.value != "" {
|
||||||
|
return errFunc(errors.New("inconsistent value"))
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns the Property key.
|
||||||
|
func (p Property) Key() string {
|
||||||
|
return p.key
|
||||||
|
}
|
||||||
|
|
||||||
|
// Value returns the Property value. Additionally a boolean value is returned
|
||||||
|
// indicating if the returned value is the empty if the Property has a value
|
||||||
|
// that is empty or if the value is not set.
|
||||||
|
func (p Property) Value() (string, bool) {
|
||||||
|
return p.value, p.hasValue
|
||||||
|
}
|
||||||
|
|
||||||
|
// String encodes Property into a string compliant with the W3C Baggage
|
||||||
|
// specification.
|
||||||
|
func (p Property) String() string {
|
||||||
|
if p.hasValue {
|
||||||
|
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value)
|
||||||
|
}
|
||||||
|
return p.key
|
||||||
|
}
|
||||||
|
|
||||||
|
type properties []Property
|
||||||
|
|
||||||
|
func fromInternalProperties(iProps []baggage.Property) properties {
|
||||||
|
if len(iProps) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
props := make(properties, len(iProps))
|
||||||
|
for i, p := range iProps {
|
||||||
|
props[i] = Property{
|
||||||
|
key: p.Key,
|
||||||
|
value: p.Value,
|
||||||
|
hasValue: p.HasValue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return props
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p properties) asInternal() []baggage.Property {
|
||||||
|
if len(p) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
iProps := make([]baggage.Property, len(p))
|
||||||
|
for i, prop := range p {
|
||||||
|
iProps[i] = baggage.Property{
|
||||||
|
Key: prop.key,
|
||||||
|
Value: prop.value,
|
||||||
|
HasValue: prop.hasValue,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return iProps
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p properties) Copy() properties {
|
||||||
|
if len(p) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
props := make(properties, len(p))
|
||||||
|
copy(props, p)
|
||||||
|
return props
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate ensures each Property in p conforms to the W3C Baggage
|
||||||
|
// specification, returning an error otherwise.
|
||||||
|
func (p properties) validate() error {
|
||||||
|
for _, prop := range p {
|
||||||
|
if err := prop.validate(); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// String encodes properties into a string compliant with the W3C Baggage
|
||||||
|
// specification.
|
||||||
|
func (p properties) String() string {
|
||||||
|
props := make([]string, len(p))
|
||||||
|
for i, prop := range p {
|
||||||
|
props[i] = prop.String()
|
||||||
|
}
|
||||||
|
return strings.Join(props, propertyDelimiter)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Member is a list-member of a baggage-string as defined by the W3C Baggage
|
||||||
|
// specification.
|
||||||
|
type Member struct {
|
||||||
|
key, value string
|
||||||
|
properties properties
|
||||||
|
|
||||||
|
// hasData indicates whether the created property contains data or not.
|
||||||
|
// Properties that do not contain data are invalid with no other check
|
||||||
|
// required.
|
||||||
|
hasData bool
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewMember returns a new Member from the passed arguments. An error is
|
||||||
|
// returned if the created Member would be invalid according to the W3C
|
||||||
|
// Baggage specification.
|
||||||
|
func NewMember(key, value string, props ...Property) (Member, error) {
|
||||||
|
m := Member{
|
||||||
|
key: key,
|
||||||
|
value: value,
|
||||||
|
properties: properties(props).Copy(),
|
||||||
|
hasData: true,
|
||||||
|
}
|
||||||
|
if err := m.validate(); err != nil {
|
||||||
|
return newInvalidMember(), err
|
||||||
|
}
|
||||||
|
|
||||||
|
return m, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func newInvalidMember() Member {
|
||||||
|
return Member{}
|
||||||
|
}
|
||||||
|
|
||||||
|
// parseMember attempts to decode a Member from the passed string. It returns
|
||||||
|
// an error if the input is invalid according to the W3C Baggage
|
||||||
|
// specification.
|
||||||
|
func parseMember(member string) (Member, error) {
|
||||||
|
if n := len(member); n > maxBytesPerMembers {
|
||||||
|
return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
var (
|
||||||
|
key, value string
|
||||||
|
props properties
|
||||||
|
)
|
||||||
|
|
||||||
|
parts := strings.SplitN(member, propertyDelimiter, 2)
|
||||||
|
switch len(parts) {
|
||||||
|
case 2:
|
||||||
|
// Parse the member properties.
|
||||||
|
for _, pStr := range strings.Split(parts[1], propertyDelimiter) {
|
||||||
|
p, err := parseProperty(pStr)
|
||||||
|
if err != nil {
|
||||||
|
return newInvalidMember(), err
|
||||||
|
}
|
||||||
|
props = append(props, p)
|
||||||
|
}
|
||||||
|
fallthrough
|
||||||
|
case 1:
|
||||||
|
// Parse the member key/value pair.
|
||||||
|
|
||||||
|
// Take into account a value can contain equal signs (=).
|
||||||
|
kv := strings.SplitN(parts[0], keyValueDelimiter, 2)
|
||||||
|
if len(kv) != 2 {
|
||||||
|
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member)
|
||||||
|
}
|
||||||
|
// "Leading and trailing whitespaces are allowed but MUST be trimmed
|
||||||
|
// when converting the header into a data structure."
|
||||||
|
key = strings.TrimSpace(kv[0])
|
||||||
|
var err error
|
||||||
|
value, err = url.QueryUnescape(strings.TrimSpace(kv[1]))
|
||||||
|
if err != nil {
|
||||||
|
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
|
||||||
|
}
|
||||||
|
if !keyRe.MatchString(key) {
|
||||||
|
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
|
||||||
|
}
|
||||||
|
if !valueRe.MatchString(value) {
|
||||||
|
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
// This should never happen unless a developer has changed the string
|
||||||
|
// splitting somehow. Panic instead of failing silently and allowing
|
||||||
|
// the bug to slip past the CI checks.
|
||||||
|
panic("failed to parse baggage member")
|
||||||
|
}
|
||||||
|
|
||||||
|
return Member{key: key, value: value, properties: props, hasData: true}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// validate ensures m conforms to the W3C Baggage specification, returning an
|
||||||
|
// error otherwise.
|
||||||
|
func (m Member) validate() error {
|
||||||
|
if !m.hasData {
|
||||||
|
return fmt.Errorf("%w: %q", errInvalidMember, m)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !keyRe.MatchString(m.key) {
|
||||||
|
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
|
||||||
|
}
|
||||||
|
if !valueRe.MatchString(m.value) {
|
||||||
|
return fmt.Errorf("%w: %q", errInvalidValue, m.value)
|
||||||
|
}
|
||||||
|
return m.properties.validate()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Key returns the Member key.
|
||||||
|
func (m Member) Key() string { return m.key }
|
||||||
|
|
||||||
|
// Value returns the Member value.
|
||||||
|
func (m Member) Value() string { return m.value }
|
||||||
|
|
||||||
|
// Properties returns a copy of the Member properties.
|
||||||
|
func (m Member) Properties() []Property { return m.properties.Copy() }
|
||||||
|
|
||||||
|
// String encodes Member into a string compliant with the W3C Baggage
|
||||||
|
// specification.
|
||||||
|
func (m Member) String() string {
|
||||||
|
// A key is just an ASCII string, but a value is URL encoded UTF-8.
|
||||||
|
s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value))
|
||||||
|
if len(m.properties) > 0 {
|
||||||
|
s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
|
||||||
|
}
|
||||||
|
return s
|
||||||
|
}
|
||||||
|
|
||||||
|
// Baggage is a list of baggage members representing the baggage-string as
|
||||||
|
// defined by the W3C Baggage specification.
|
||||||
|
type Baggage struct { //nolint:golint
|
||||||
|
list baggage.List
|
||||||
|
}
|
||||||
|
|
||||||
|
// New returns a new valid Baggage. It returns an error if it results in a
|
||||||
|
// Baggage exceeding limits set in that specification.
|
||||||
|
//
|
||||||
|
// It expects all the provided members to have already been validated.
|
||||||
|
func New(members ...Member) (Baggage, error) {
|
||||||
|
if len(members) == 0 {
|
||||||
|
return Baggage{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
b := make(baggage.List)
|
||||||
|
for _, m := range members {
|
||||||
|
if !m.hasData {
|
||||||
|
return Baggage{}, errInvalidMember
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenTelemetry resolves duplicates by last-one-wins.
|
||||||
|
b[m.key] = baggage.Item{
|
||||||
|
Value: m.value,
|
||||||
|
Properties: m.properties.asInternal(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Check member numbers after deduplicating.
|
||||||
|
if len(b) > maxMembers {
|
||||||
|
return Baggage{}, errMemberNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
bag := Baggage{b}
|
||||||
|
if n := len(bag.String()); n > maxBytesPerBaggageString {
|
||||||
|
return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
return bag, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse attempts to decode a baggage-string from the passed string. It
|
||||||
|
// returns an error if the input is invalid according to the W3C Baggage
|
||||||
|
// specification.
|
||||||
|
//
|
||||||
|
// If there are duplicate list-members contained in baggage, the last one
|
||||||
|
// defined (reading left-to-right) will be the only one kept. This diverges
|
||||||
|
// from the W3C Baggage specification which allows duplicate list-members, but
|
||||||
|
// conforms to the OpenTelemetry Baggage specification.
|
||||||
|
func Parse(bStr string) (Baggage, error) {
|
||||||
|
if bStr == "" {
|
||||||
|
return Baggage{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if n := len(bStr); n > maxBytesPerBaggageString {
|
||||||
|
return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n)
|
||||||
|
}
|
||||||
|
|
||||||
|
b := make(baggage.List)
|
||||||
|
for _, memberStr := range strings.Split(bStr, listDelimiter) {
|
||||||
|
m, err := parseMember(memberStr)
|
||||||
|
if err != nil {
|
||||||
|
return Baggage{}, err
|
||||||
|
}
|
||||||
|
// OpenTelemetry resolves duplicates by last-one-wins.
|
||||||
|
b[m.key] = baggage.Item{
|
||||||
|
Value: m.value,
|
||||||
|
Properties: m.properties.asInternal(),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// OpenTelemetry does not allow for duplicate list-members, but the W3C
|
||||||
|
// specification does. Now that we have deduplicated, ensure the baggage
|
||||||
|
// does not exceed list-member limits.
|
||||||
|
if len(b) > maxMembers {
|
||||||
|
return Baggage{}, errMemberNumber
|
||||||
|
}
|
||||||
|
|
||||||
|
return Baggage{b}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Member returns the baggage list-member identified by key.
|
||||||
|
//
|
||||||
|
// If there is no list-member matching the passed key the returned Member will
|
||||||
|
// be a zero-value Member.
|
||||||
|
// The returned member is not validated, as we assume the validation happened
|
||||||
|
// when it was added to the Baggage.
|
||||||
|
func (b Baggage) Member(key string) Member {
|
||||||
|
v, ok := b.list[key]
|
||||||
|
if !ok {
|
||||||
|
// We do not need to worry about distiguising between the situation
|
||||||
|
// where a zero-valued Member is included in the Baggage because a
|
||||||
|
// zero-valued Member is invalid according to the W3C Baggage
|
||||||
|
// specification (it has an empty key).
|
||||||
|
return newInvalidMember()
|
||||||
|
}
|
||||||
|
|
||||||
|
return Member{
|
||||||
|
key: key,
|
||||||
|
value: v.Value,
|
||||||
|
properties: fromInternalProperties(v.Properties),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Members returns all the baggage list-members.
|
||||||
|
// The order of the returned list-members does not have significance.
|
||||||
|
//
|
||||||
|
// The returned members are not validated, as we assume the validation happened
|
||||||
|
// when they were added to the Baggage.
|
||||||
|
func (b Baggage) Members() []Member {
|
||||||
|
if len(b.list) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
members := make([]Member, 0, len(b.list))
|
||||||
|
for k, v := range b.list {
|
||||||
|
members = append(members, Member{
|
||||||
|
key: k,
|
||||||
|
value: v.Value,
|
||||||
|
properties: fromInternalProperties(v.Properties),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
return members
|
||||||
|
}
|
||||||
|
|
||||||
|
// SetMember returns a copy the Baggage with the member included. If the
|
||||||
|
// baggage contains a Member with the same key the existing Member is
|
||||||
|
// replaced.
|
||||||
|
//
|
||||||
|
// If member is invalid according to the W3C Baggage specification, an error
|
||||||
|
// is returned with the original Baggage.
|
||||||
|
func (b Baggage) SetMember(member Member) (Baggage, error) {
|
||||||
|
if !member.hasData {
|
||||||
|
return b, errInvalidMember
|
||||||
|
}
|
||||||
|
|
||||||
|
n := len(b.list)
|
||||||
|
if _, ok := b.list[member.key]; !ok {
|
||||||
|
n++
|
||||||
|
}
|
||||||
|
list := make(baggage.List, n)
|
||||||
|
|
||||||
|
for k, v := range b.list {
|
||||||
|
// Do not copy if we are just going to overwrite.
|
||||||
|
if k == member.key {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
list[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
list[member.key] = baggage.Item{
|
||||||
|
Value: member.value,
|
||||||
|
Properties: member.properties.asInternal(),
|
||||||
|
}
|
||||||
|
|
||||||
|
return Baggage{list: list}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DeleteMember returns a copy of the Baggage with the list-member identified
|
||||||
|
// by key removed.
|
||||||
|
func (b Baggage) DeleteMember(key string) Baggage {
|
||||||
|
n := len(b.list)
|
||||||
|
if _, ok := b.list[key]; ok {
|
||||||
|
n--
|
||||||
|
}
|
||||||
|
list := make(baggage.List, n)
|
||||||
|
|
||||||
|
for k, v := range b.list {
|
||||||
|
if k == key {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
list[k] = v
|
||||||
|
}
|
||||||
|
|
||||||
|
return Baggage{list: list}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Len returns the number of list-members in the Baggage.
|
||||||
|
func (b Baggage) Len() int {
|
||||||
|
return len(b.list)
|
||||||
|
}
|
||||||
|
|
||||||
|
// String encodes Baggage into a string compliant with the W3C Baggage
|
||||||
|
// specification. The returned string will be invalid if the Baggage contains
|
||||||
|
// any invalid list-members.
|
||||||
|
func (b Baggage) String() string {
|
||||||
|
members := make([]string, 0, len(b.list))
|
||||||
|
for k, v := range b.list {
|
||||||
|
members = append(members, Member{
|
||||||
|
key: k,
|
||||||
|
value: v.Value,
|
||||||
|
properties: fromInternalProperties(v.Properties),
|
||||||
|
}.String())
|
||||||
|
}
|
||||||
|
return strings.Join(members, listDelimiter)
|
||||||
|
}
|
|
@ -0,0 +1,39 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package baggage // import "go.opentelemetry.io/otel/baggage"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/internal/baggage"
|
||||||
|
)
|
||||||
|
|
||||||
|
// ContextWithBaggage returns a copy of parent with baggage.
|
||||||
|
func ContextWithBaggage(parent context.Context, b Baggage) context.Context {
|
||||||
|
// Delegate so any hooks for the OpenTracing bridge are handled.
|
||||||
|
return baggage.ContextWithList(parent, b.list)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ContextWithoutBaggage returns a copy of parent with no baggage.
|
||||||
|
func ContextWithoutBaggage(parent context.Context) context.Context {
|
||||||
|
// Delegate so any hooks for the OpenTracing bridge are handled.
|
||||||
|
return baggage.ContextWithList(parent, nil)
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromContext returns the baggage contained in ctx.
|
||||||
|
func FromContext(ctx context.Context) Baggage {
|
||||||
|
// Delegate so any hooks for the OpenTracing bridge are handled.
|
||||||
|
return Baggage{list: baggage.ListFromContext(ctx)}
|
||||||
|
}
|
|
@ -0,0 +1,20 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package baggage provides functionality for storing and retrieving
|
||||||
|
baggage items in Go context. For propagating the baggage, see the
|
||||||
|
go.opentelemetry.io/otel/propagation package.
|
||||||
|
*/
|
||||||
|
package baggage // import "go.opentelemetry.io/otel/baggage"
|
|
@ -0,0 +1,106 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package codes // import "go.opentelemetry.io/otel/codes"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/json"
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
// Unset is the default status code.
|
||||||
|
Unset Code = 0
|
||||||
|
// Error indicates the operation contains an error.
|
||||||
|
Error Code = 1
|
||||||
|
// Ok indicates operation has been validated by an Application developers
|
||||||
|
// or Operator to have completed successfully, or contain no error.
|
||||||
|
Ok Code = 2
|
||||||
|
|
||||||
|
maxCode = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
// Code is an 32-bit representation of a status state.
|
||||||
|
type Code uint32
|
||||||
|
|
||||||
|
var codeToStr = map[Code]string{
|
||||||
|
Unset: "Unset",
|
||||||
|
Error: "Error",
|
||||||
|
Ok: "Ok",
|
||||||
|
}
|
||||||
|
|
||||||
|
var strToCode = map[string]Code{
|
||||||
|
`"Unset"`: Unset,
|
||||||
|
`"Error"`: Error,
|
||||||
|
`"Ok"`: Ok,
|
||||||
|
}
|
||||||
|
|
||||||
|
// String returns the Code as a string.
|
||||||
|
func (c Code) String() string {
|
||||||
|
return codeToStr[c]
|
||||||
|
}
|
||||||
|
|
||||||
|
// UnmarshalJSON unmarshals b into the Code.
|
||||||
|
//
|
||||||
|
// This is based on the functionality in the gRPC codes package:
|
||||||
|
// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244
|
||||||
|
func (c *Code) UnmarshalJSON(b []byte) error {
|
||||||
|
// From json.Unmarshaler: By convention, to approximate the behavior of
|
||||||
|
// Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as
|
||||||
|
// a no-op.
|
||||||
|
if string(b) == "null" {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if c == nil {
|
||||||
|
return fmt.Errorf("nil receiver passed to UnmarshalJSON")
|
||||||
|
}
|
||||||
|
|
||||||
|
var x interface{}
|
||||||
|
if err := json.Unmarshal(b, &x); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
switch x.(type) {
|
||||||
|
case string:
|
||||||
|
if jc, ok := strToCode[string(b)]; ok {
|
||||||
|
*c = jc
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("invalid code: %q", string(b))
|
||||||
|
case float64:
|
||||||
|
if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil {
|
||||||
|
if ci >= maxCode {
|
||||||
|
return fmt.Errorf("invalid code: %q", ci)
|
||||||
|
}
|
||||||
|
|
||||||
|
*c = Code(ci)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return fmt.Errorf("invalid code: %q", string(b))
|
||||||
|
default:
|
||||||
|
return fmt.Errorf("invalid code: %q", string(b))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalJSON returns c as the JSON encoding of c.
|
||||||
|
func (c *Code) MarshalJSON() ([]byte, error) {
|
||||||
|
if c == nil {
|
||||||
|
return []byte("null"), nil
|
||||||
|
}
|
||||||
|
str, ok := codeToStr[*c]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("invalid code: %d", *c)
|
||||||
|
}
|
||||||
|
return []byte(fmt.Sprintf("%q", str)), nil
|
||||||
|
}
|
|
@ -0,0 +1,21 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package codes defines the canonical error codes used by OpenTelemetry.
|
||||||
|
|
||||||
|
It conforms to [the OpenTelemetry
|
||||||
|
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode).
|
||||||
|
*/
|
||||||
|
package codes // import "go.opentelemetry.io/otel/codes"
|
|
@ -0,0 +1,34 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
/*
|
||||||
|
Package otel provides global access to the OpenTelemetry API. The subpackages of
|
||||||
|
the otel package provide an implementation of the OpenTelemetry API.
|
||||||
|
|
||||||
|
The provided API is used to instrument code and measure data about that code's
|
||||||
|
performance and operation. The measured data, by default, is not processed or
|
||||||
|
transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
|
||||||
|
default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
|
||||||
|
exporters are used to process and transport this data.
|
||||||
|
|
||||||
|
To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/.
|
||||||
|
|
||||||
|
To read more about tracing, see go.opentelemetry.io/otel/trace.
|
||||||
|
|
||||||
|
To read more about metrics, see go.opentelemetry.io/otel/metric.
|
||||||
|
|
||||||
|
To read more about propagation, see go.opentelemetry.io/otel/propagation and
|
||||||
|
go.opentelemetry.io/otel/baggage.
|
||||||
|
*/
|
||||||
|
package otel // import "go.opentelemetry.io/otel"
|
|
@ -0,0 +1,38 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otel // import "go.opentelemetry.io/otel"
|
||||||
|
|
||||||
|
// ErrorHandler handles irremediable events.
|
||||||
|
type ErrorHandler interface {
|
||||||
|
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||||
|
// must never be done outside of a new major release.
|
||||||
|
|
||||||
|
// Handle handles any error deemed irremediable by an OpenTelemetry
|
||||||
|
// component.
|
||||||
|
Handle(error)
|
||||||
|
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||||
|
// must never be done outside of a new major release.
|
||||||
|
}
|
||||||
|
|
||||||
|
// ErrorHandlerFunc is a convenience adapter to allow the use of a function
|
||||||
|
// as an ErrorHandler.
|
||||||
|
type ErrorHandlerFunc func(error)
|
||||||
|
|
||||||
|
var _ ErrorHandler = ErrorHandlerFunc(nil)
|
||||||
|
|
||||||
|
// Handle handles the irremediable error by calling the ErrorHandlerFunc itself.
|
||||||
|
func (f ErrorHandlerFunc) Handle(err error) {
|
||||||
|
f(err)
|
||||||
|
}
|
|
@ -0,0 +1,201 @@
|
||||||
|
Apache License
|
||||||
|
Version 2.0, January 2004
|
||||||
|
http://www.apache.org/licenses/
|
||||||
|
|
||||||
|
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||||
|
|
||||||
|
1. Definitions.
|
||||||
|
|
||||||
|
"License" shall mean the terms and conditions for use, reproduction,
|
||||||
|
and distribution as defined by Sections 1 through 9 of this document.
|
||||||
|
|
||||||
|
"Licensor" shall mean the copyright owner or entity authorized by
|
||||||
|
the copyright owner that is granting the License.
|
||||||
|
|
||||||
|
"Legal Entity" shall mean the union of the acting entity and all
|
||||||
|
other entities that control, are controlled by, or are under common
|
||||||
|
control with that entity. For the purposes of this definition,
|
||||||
|
"control" means (i) the power, direct or indirect, to cause the
|
||||||
|
direction or management of such entity, whether by contract or
|
||||||
|
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||||
|
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||||
|
|
||||||
|
"You" (or "Your") shall mean an individual or Legal Entity
|
||||||
|
exercising permissions granted by this License.
|
||||||
|
|
||||||
|
"Source" form shall mean the preferred form for making modifications,
|
||||||
|
including but not limited to software source code, documentation
|
||||||
|
source, and configuration files.
|
||||||
|
|
||||||
|
"Object" form shall mean any form resulting from mechanical
|
||||||
|
transformation or translation of a Source form, including but
|
||||||
|
not limited to compiled object code, generated documentation,
|
||||||
|
and conversions to other media types.
|
||||||
|
|
||||||
|
"Work" shall mean the work of authorship, whether in Source or
|
||||||
|
Object form, made available under the License, as indicated by a
|
||||||
|
copyright notice that is included in or attached to the work
|
||||||
|
(an example is provided in the Appendix below).
|
||||||
|
|
||||||
|
"Derivative Works" shall mean any work, whether in Source or Object
|
||||||
|
form, that is based on (or derived from) the Work and for which the
|
||||||
|
editorial revisions, annotations, elaborations, or other modifications
|
||||||
|
represent, as a whole, an original work of authorship. For the purposes
|
||||||
|
of this License, Derivative Works shall not include works that remain
|
||||||
|
separable from, or merely link (or bind by name) to the interfaces of,
|
||||||
|
the Work and Derivative Works thereof.
|
||||||
|
|
||||||
|
"Contribution" shall mean any work of authorship, including
|
||||||
|
the original version of the Work and any modifications or additions
|
||||||
|
to that Work or Derivative Works thereof, that is intentionally
|
||||||
|
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||||
|
or by an individual or Legal Entity authorized to submit on behalf of
|
||||||
|
the copyright owner. For the purposes of this definition, "submitted"
|
||||||
|
means any form of electronic, verbal, or written communication sent
|
||||||
|
to the Licensor or its representatives, including but not limited to
|
||||||
|
communication on electronic mailing lists, source code control systems,
|
||||||
|
and issue tracking systems that are managed by, or on behalf of, the
|
||||||
|
Licensor for the purpose of discussing and improving the Work, but
|
||||||
|
excluding communication that is conspicuously marked or otherwise
|
||||||
|
designated in writing by the copyright owner as "Not a Contribution."
|
||||||
|
|
||||||
|
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||||
|
on behalf of whom a Contribution has been received by Licensor and
|
||||||
|
subsequently incorporated within the Work.
|
||||||
|
|
||||||
|
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
copyright license to reproduce, prepare Derivative Works of,
|
||||||
|
publicly display, publicly perform, sublicense, and distribute the
|
||||||
|
Work and such Derivative Works in Source or Object form.
|
||||||
|
|
||||||
|
3. Grant of Patent License. Subject to the terms and conditions of
|
||||||
|
this License, each Contributor hereby grants to You a perpetual,
|
||||||
|
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||||
|
(except as stated in this section) patent license to make, have made,
|
||||||
|
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||||
|
where such license applies only to those patent claims licensable
|
||||||
|
by such Contributor that are necessarily infringed by their
|
||||||
|
Contribution(s) alone or by combination of their Contribution(s)
|
||||||
|
with the Work to which such Contribution(s) was submitted. If You
|
||||||
|
institute patent litigation against any entity (including a
|
||||||
|
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||||
|
or a Contribution incorporated within the Work constitutes direct
|
||||||
|
or contributory patent infringement, then any patent licenses
|
||||||
|
granted to You under this License for that Work shall terminate
|
||||||
|
as of the date such litigation is filed.
|
||||||
|
|
||||||
|
4. Redistribution. You may reproduce and distribute copies of the
|
||||||
|
Work or Derivative Works thereof in any medium, with or without
|
||||||
|
modifications, and in Source or Object form, provided that You
|
||||||
|
meet the following conditions:
|
||||||
|
|
||||||
|
(a) You must give any other recipients of the Work or
|
||||||
|
Derivative Works a copy of this License; and
|
||||||
|
|
||||||
|
(b) You must cause any modified files to carry prominent notices
|
||||||
|
stating that You changed the files; and
|
||||||
|
|
||||||
|
(c) You must retain, in the Source form of any Derivative Works
|
||||||
|
that You distribute, all copyright, patent, trademark, and
|
||||||
|
attribution notices from the Source form of the Work,
|
||||||
|
excluding those notices that do not pertain to any part of
|
||||||
|
the Derivative Works; and
|
||||||
|
|
||||||
|
(d) If the Work includes a "NOTICE" text file as part of its
|
||||||
|
distribution, then any Derivative Works that You distribute must
|
||||||
|
include a readable copy of the attribution notices contained
|
||||||
|
within such NOTICE file, excluding those notices that do not
|
||||||
|
pertain to any part of the Derivative Works, in at least one
|
||||||
|
of the following places: within a NOTICE text file distributed
|
||||||
|
as part of the Derivative Works; within the Source form or
|
||||||
|
documentation, if provided along with the Derivative Works; or,
|
||||||
|
within a display generated by the Derivative Works, if and
|
||||||
|
wherever such third-party notices normally appear. The contents
|
||||||
|
of the NOTICE file are for informational purposes only and
|
||||||
|
do not modify the License. You may add Your own attribution
|
||||||
|
notices within Derivative Works that You distribute, alongside
|
||||||
|
or as an addendum to the NOTICE text from the Work, provided
|
||||||
|
that such additional attribution notices cannot be construed
|
||||||
|
as modifying the License.
|
||||||
|
|
||||||
|
You may add Your own copyright statement to Your modifications and
|
||||||
|
may provide additional or different license terms and conditions
|
||||||
|
for use, reproduction, or distribution of Your modifications, or
|
||||||
|
for any such Derivative Works as a whole, provided Your use,
|
||||||
|
reproduction, and distribution of the Work otherwise complies with
|
||||||
|
the conditions stated in this License.
|
||||||
|
|
||||||
|
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||||
|
any Contribution intentionally submitted for inclusion in the Work
|
||||||
|
by You to the Licensor shall be under the terms and conditions of
|
||||||
|
this License, without any additional terms or conditions.
|
||||||
|
Notwithstanding the above, nothing herein shall supersede or modify
|
||||||
|
the terms of any separate license agreement you may have executed
|
||||||
|
with Licensor regarding such Contributions.
|
||||||
|
|
||||||
|
6. Trademarks. This License does not grant permission to use the trade
|
||||||
|
names, trademarks, service marks, or product names of the Licensor,
|
||||||
|
except as required for reasonable and customary use in describing the
|
||||||
|
origin of the Work and reproducing the content of the NOTICE file.
|
||||||
|
|
||||||
|
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||||
|
agreed to in writing, Licensor provides the Work (and each
|
||||||
|
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||||
|
implied, including, without limitation, any warranties or conditions
|
||||||
|
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||||
|
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||||
|
appropriateness of using or redistributing the Work and assume any
|
||||||
|
risks associated with Your exercise of permissions under this License.
|
||||||
|
|
||||||
|
8. Limitation of Liability. In no event and under no legal theory,
|
||||||
|
whether in tort (including negligence), contract, or otherwise,
|
||||||
|
unless required by applicable law (such as deliberate and grossly
|
||||||
|
negligent acts) or agreed to in writing, shall any Contributor be
|
||||||
|
liable to You for damages, including any direct, indirect, special,
|
||||||
|
incidental, or consequential damages of any character arising as a
|
||||||
|
result of this License or out of the use or inability to use the
|
||||||
|
Work (including but not limited to damages for loss of goodwill,
|
||||||
|
work stoppage, computer failure or malfunction, or any and all
|
||||||
|
other commercial damages or losses), even if such Contributor
|
||||||
|
has been advised of the possibility of such damages.
|
||||||
|
|
||||||
|
9. Accepting Warranty or Additional Liability. While redistributing
|
||||||
|
the Work or Derivative Works thereof, You may choose to offer,
|
||||||
|
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||||
|
or other liability obligations and/or rights consistent with this
|
||||||
|
License. However, in accepting such obligations, You may act only
|
||||||
|
on Your own behalf and on Your sole responsibility, not on behalf
|
||||||
|
of any other Contributor, and only if You agree to indemnify,
|
||||||
|
defend, and hold each Contributor harmless for any liability
|
||||||
|
incurred by, or claims asserted against, such Contributor by reason
|
||||||
|
of your accepting any such warranty or additional liability.
|
||||||
|
|
||||||
|
END OF TERMS AND CONDITIONS
|
||||||
|
|
||||||
|
APPENDIX: How to apply the Apache License to your work.
|
||||||
|
|
||||||
|
To apply the Apache License to your work, attach the following
|
||||||
|
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||||
|
replaced with your own identifying information. (Don't include
|
||||||
|
the brackets!) The text should be enclosed in the appropriate
|
||||||
|
comment syntax for the file format. We also recommend that a
|
||||||
|
file or class name and description of purpose be included on the
|
||||||
|
same "printed page" as the copyright notice for easier
|
||||||
|
identification within third-party archives.
|
||||||
|
|
||||||
|
Copyright [yyyy] [name of copyright owner]
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
51
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
generated
vendored
Normal file
51
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md
generated
vendored
Normal file
|
@ -0,0 +1,51 @@
|
||||||
|
# OpenTelemetry-Go OTLP Span Exporter
|
||||||
|
|
||||||
|
[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
|
||||||
|
|
||||||
|
[OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.5.0/specification/protocol/exporter.md) implementation.
|
||||||
|
|
||||||
|
## Installation
|
||||||
|
|
||||||
|
```
|
||||||
|
go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace
|
||||||
|
```
|
||||||
|
|
||||||
|
## Examples
|
||||||
|
|
||||||
|
- [Exporter setup and examples](./otlptracehttp/example_test.go)
|
||||||
|
- [Full example sending telemetry to a local collector](../../../example/otel-collector)
|
||||||
|
|
||||||
|
## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)
|
||||||
|
|
||||||
|
The `otlptrace` package provides an exporter implementing the OTel span exporter interface.
|
||||||
|
This exporter is configured using a client satisfying the `otlptrace.Client` interface.
|
||||||
|
This client handles the transformation of data into wire format and the transmission of that data to the collector.
|
||||||
|
|
||||||
|
## [`otlptracegrpc`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc)
|
||||||
|
|
||||||
|
The `otlptracegrpc` package implements a client for the span exporter that sends trace telemetry data to the collector using gRPC with protobuf-encoded payloads.
|
||||||
|
|
||||||
|
## [`otlptracehttp`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp)
|
||||||
|
|
||||||
|
The `otlptracehttp` package implements a client for the span exporter that sends trace telemetry data to the collector using HTTP with protobuf-encoded payloads.
|
||||||
|
|
||||||
|
## Configuration
|
||||||
|
|
||||||
|
### Environment Variables
|
||||||
|
|
||||||
|
The following environment variables can be used (instead of options objects) to
|
||||||
|
override the default configuration. For more information about how each of
|
||||||
|
these environment variables is interpreted, see [the OpenTelemetry
|
||||||
|
specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md).
|
||||||
|
|
||||||
|
| Environment variable | Option | Default value |
|
||||||
|
| ------------------------------------------------------------------------ |------------------------------ | -------------------------------------------------------- |
|
||||||
|
| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` or `https://localhost:4318`[^1] |
|
||||||
|
| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | |
|
||||||
|
| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | |
|
||||||
|
| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | |
|
||||||
|
| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` |
|
||||||
|
|
||||||
|
[^1]: The gRPC client defaults to `https://localhost:4317` and the HTTP client `https://localhost:4318`.
|
||||||
|
|
||||||
|
Configuration using options have precedence over the environment variables.
|
54
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
generated
vendored
Normal file
54
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go
generated
vendored
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Client manages connections to the collector, handles the
|
||||||
|
// transformation of data into wire format, and the transmission of that
|
||||||
|
// data to the collector.
|
||||||
|
type Client interface {
|
||||||
|
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||||
|
// must never be done outside of a new major release.
|
||||||
|
|
||||||
|
// Start should establish connection(s) to endpoint(s). It is
|
||||||
|
// called just once by the exporter, so the implementation
|
||||||
|
// does not need to worry about idempotence and locking.
|
||||||
|
Start(ctx context.Context) error
|
||||||
|
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||||
|
// must never be done outside of a new major release.
|
||||||
|
|
||||||
|
// Stop should close the connections. The function is called
|
||||||
|
// only once by the exporter, so the implementation does not
|
||||||
|
// need to worry about idempotence, but it may be called
|
||||||
|
// concurrently with UploadTraces, so proper
|
||||||
|
// locking is required. The function serves as a
|
||||||
|
// synchronization point - after the function returns, the
|
||||||
|
// process of closing connections is assumed to be finished.
|
||||||
|
Stop(ctx context.Context) error
|
||||||
|
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||||
|
// must never be done outside of a new major release.
|
||||||
|
|
||||||
|
// UploadTraces should transform the passed traces to the wire
|
||||||
|
// format and send it to the collector. May be called
|
||||||
|
// concurrently.
|
||||||
|
UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error
|
||||||
|
// DO NOT CHANGE: any modification will not be backwards compatible and
|
||||||
|
// must never be done outside of a new major release.
|
||||||
|
}
|
113
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
generated
vendored
Normal file
113
vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go
generated
vendored
Normal file
|
@ -0,0 +1,113 @@
|
||||||
|
// Copyright The OpenTelemetry Authors
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"errors"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
|
||||||
|
tracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
errAlreadyStarted = errors.New("already started")
|
||||||
|
)
|
||||||
|
|
||||||
|
// Exporter exports trace data in the OTLP wire format.
|
||||||
|
type Exporter struct {
|
||||||
|
client Client
|
||||||
|
|
||||||
|
mu sync.RWMutex
|
||||||
|
started bool
|
||||||
|
|
||||||
|
startOnce sync.Once
|
||||||
|
stopOnce sync.Once
|
||||||
|
}
|
||||||
|
|
||||||
|
// ExportSpans exports a batch of spans.
|
||||||
|
func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error {
|
||||||
|
protoSpans := tracetransform.Spans(ss)
|
||||||
|
if len(protoSpans) == 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return e.client.UploadTraces(ctx, protoSpans)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Start establishes a connection to the receiving endpoint.
|
||||||
|
func (e *Exporter) Start(ctx context.Context) error {
|
||||||
|
var err = errAlreadyStarted
|
||||||
|
e.startOnce.Do(func() {
|
||||||
|
e.mu.Lock()
|
||||||
|
e.started = true
|
||||||
|
e.mu.Unlock()
|
||||||
|
err = e.client.Start(ctx)
|
||||||
|
})
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Shutdown flushes all exports and closes all connections to the receiving endpoint.
|
||||||
|
func (e *Exporter) Shutdown(ctx context.Context) error {
|
||||||
|
e.mu.RLock()
|
||||||
|
started := e.started
|
||||||
|
e.mu.RUnlock()
|
||||||
|
|
||||||
|
if !started {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var err error
|
||||||
|
|
||||||
|
e.stopOnce.Do(func() {
|
||||||
|
err = e.client.Stop(ctx)
|
||||||
|
e.mu.Lock()
|
||||||
|
e.started = false
|
||||||
|
e.mu.Unlock()
|
||||||
|
})
|
||||||
|
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
var _ tracesdk.SpanExporter = (*Exporter)(nil)
|
||||||
|
|
||||||
|
// New constructs a new Exporter and starts it.
|
||||||
|
func New(ctx context.Context, client Client) (*Exporter, error) {
|
||||||
|
exp := NewUnstarted(client)
|
||||||
|
if err := exp.Start(ctx); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return exp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewUnstarted constructs a new Exporter and does not start it.
|
||||||
|
func NewUnstarted(client Client) *Exporter {
|
||||||
|
return &Exporter{
|
||||||
|
client: client,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// MarshalLog is the marshaling function used by the logging system to represent this exporter.
|
||||||
|
func (e *Exporter) MarshalLog() interface{} {
|
||||||
|
return struct {
|
||||||
|
Type string
|
||||||
|
Client Client
|
||||||
|
}{
|
||||||
|
Type: "otlptrace",
|
||||||
|
Client: e.client,
|
||||||
|
}
|
||||||
|
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue