Merge branch 'cloudflare:master' into master

This commit is contained in:
Areg Vrtanesyan 2026-02-23 15:43:39 +00:00 committed by GitHub
commit 750f49f09a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
218 changed files with 40008 additions and 12215 deletions

View File

@ -8,7 +8,7 @@ type TunnelClient interface {
CreateTunnel(name string, tunnelSecret []byte) (*TunnelWithToken, error) CreateTunnel(name string, tunnelSecret []byte) (*TunnelWithToken, error)
GetTunnel(tunnelID uuid.UUID) (*Tunnel, error) GetTunnel(tunnelID uuid.UUID) (*Tunnel, error)
GetTunnelToken(tunnelID uuid.UUID) (string, error) GetTunnelToken(tunnelID uuid.UUID) (string, error)
GetManagementToken(tunnelID uuid.UUID) (string, error) GetManagementToken(tunnelID uuid.UUID, resource ManagementResource) (string, error)
DeleteTunnel(tunnelID uuid.UUID, cascade bool) error DeleteTunnel(tunnelID uuid.UUID, cascade bool) error
ListTunnels(filter *TunnelFilter) ([]*Tunnel, error) ListTunnels(filter *TunnelFilter) ([]*Tunnel, error)
ListActiveClients(tunnelID uuid.UUID) ([]*ActiveClient, error) ListActiveClients(tunnelID uuid.UUID) ([]*ActiveClient, error)

View File

@ -15,6 +15,21 @@ import (
var ErrTunnelNameConflict = errors.New("tunnel with name already exists") var ErrTunnelNameConflict = errors.New("tunnel with name already exists")
type ManagementResource int
const (
Logs ManagementResource = iota
)
func (r ManagementResource) String() string {
switch r {
case Logs:
return "logs"
default:
return ""
}
}
type Tunnel struct { type Tunnel struct {
ID uuid.UUID `json:"id"` ID uuid.UUID `json:"id"`
Name string `json:"name"` Name string `json:"name"`
@ -50,10 +65,6 @@ type newTunnel struct {
TunnelSecret []byte `json:"tunnel_secret"` TunnelSecret []byte `json:"tunnel_secret"`
} }
type managementRequest struct {
Resources []string `json:"resources"`
}
type CleanupParams struct { type CleanupParams struct {
queryParams url.Values queryParams url.Values
} }
@ -137,15 +148,16 @@ func (r *RESTClient) GetTunnelToken(tunnelID uuid.UUID) (token string, err error
return "", r.statusCodeToError("get tunnel token", resp) return "", r.statusCodeToError("get tunnel token", resp)
} }
func (r *RESTClient) GetManagementToken(tunnelID uuid.UUID) (token string, err error) { // managementEndpointPath returns the path segment for a management resource endpoint
func managementEndpointPath(tunnelID uuid.UUID, res ManagementResource) string {
return fmt.Sprintf("%v/management/%s", tunnelID, res.String())
}
func (r *RESTClient) GetManagementToken(tunnelID uuid.UUID, res ManagementResource) (token string, err error) {
endpoint := r.baseEndpoints.accountLevel endpoint := r.baseEndpoints.accountLevel
endpoint.Path = path.Join(endpoint.Path, fmt.Sprintf("%v/management", tunnelID)) endpoint.Path = path.Join(endpoint.Path, managementEndpointPath(tunnelID, res))
body := &managementRequest{ resp, err := r.sendRequest("POST", endpoint, nil)
Resources: []string{"logs"},
}
resp, err := r.sendRequest("POST", endpoint, body)
if err != nil { if err != nil {
return "", errors.Wrap(err, "REST request failed") return "", errors.Wrap(err, "REST request failed")
} }

View File

@ -2,7 +2,6 @@ package cfapi
import ( import (
"bytes" "bytes"
"fmt"
"net" "net"
"reflect" "reflect"
"strings" "strings"
@ -11,6 +10,7 @@ import (
"github.com/google/uuid" "github.com/google/uuid"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
) )
var loc, _ = time.LoadLocation("UTC") var loc, _ = time.LoadLocation("UTC")
@ -52,7 +52,6 @@ func Test_unmarshalTunnel(t *testing.T) {
} }
func TestUnmarshalTunnelOk(t *testing.T) { func TestUnmarshalTunnelOk(t *testing.T) {
jsonBody := `{"success": true, "result": {"id": "00000000-0000-0000-0000-000000000000","name":"test","created_at":"0001-01-01T00:00:00Z","connections":[]}}` jsonBody := `{"success": true, "result": {"id": "00000000-0000-0000-0000-000000000000","name":"test","created_at":"0001-01-01T00:00:00Z","connections":[]}}`
expected := Tunnel{ expected := Tunnel{
ID: uuid.Nil, ID: uuid.Nil,
@ -61,12 +60,11 @@ func TestUnmarshalTunnelOk(t *testing.T) {
Connections: []Connection{}, Connections: []Connection{},
} }
actual, err := unmarshalTunnel(bytes.NewReader([]byte(jsonBody))) actual, err := unmarshalTunnel(bytes.NewReader([]byte(jsonBody)))
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, &expected, actual) require.Equal(t, &expected, actual)
} }
func TestUnmarshalTunnelErr(t *testing.T) { func TestUnmarshalTunnelErr(t *testing.T) {
tests := []string{ tests := []string{
`abc`, `abc`,
`{"success": true, "result": abc}`, `{"success": true, "result": abc}`,
@ -76,7 +74,53 @@ func TestUnmarshalTunnelErr(t *testing.T) {
for i, test := range tests { for i, test := range tests {
_, err := unmarshalTunnel(bytes.NewReader([]byte(test))) _, err := unmarshalTunnel(bytes.NewReader([]byte(test)))
assert.Error(t, err, fmt.Sprintf("Test #%v failed", i)) assert.Error(t, err, "Test #%v failed", i)
}
}
func TestManagementResource_String(t *testing.T) {
tests := []struct {
name string
resource ManagementResource
want string
}{
{
name: "Logs",
resource: Logs,
want: "logs",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
assert.Equal(t, tt.want, tt.resource.String())
})
}
}
func TestManagementResource_String_Unknown(t *testing.T) {
unknown := ManagementResource(999)
assert.Equal(t, "", unknown.String())
}
func TestManagementEndpointPath(t *testing.T) {
tunnelID := uuid.MustParse("b34cc7ce-925b-46ee-bc23-4cb5c18d8292")
tests := []struct {
name string
resource ManagementResource
want string
}{
{
name: "Logs resource",
resource: Logs,
want: "b34cc7ce-925b-46ee-bc23-4cb5c18d8292/management/logs",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := managementEndpointPath(tunnelID, tt.resource)
assert.Equal(t, tt.want, got)
})
} }
} }
@ -97,6 +141,6 @@ func TestUnmarshalConnections(t *testing.T) {
}}, }},
} }
actual, err := parseConnectionsDetails(bytes.NewReader([]byte(jsonBody))) actual, err := parseConnectionsDetails(bytes.NewReader([]byte(jsonBody)))
assert.NoError(t, err) require.NoError(t, err)
assert.Equal(t, []*ActiveClient{&expected}, actual) assert.Equal(t, []*ActiveClient{&expected}, actual)
} }

View File

@ -10,7 +10,7 @@ import (
"github.com/cloudflare/cloudflared/logger" "github.com/cloudflare/cloudflared/logger"
) )
const removedMessage = "dns-proxy feature is not supported, since version 2026.2.0" const removedMessage = "dns-proxy feature is no longer supported"
func Command() *cli.Command { func Command() *cli.Command {
return &cli.Command{ return &cli.Command{
@ -24,7 +24,7 @@ func Command() *cli.Command {
func Run(c *cli.Context) error { func Run(c *cli.Context) error {
log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog) log := logger.CreateLoggerFromContext(c, logger.EnableTerminalLog)
err := errors.New(removedMessage) err := errors.New(removedMessage)
log.Err(err).Msg("DNS Proxy is no longer supported") log.Error().Msg("DNS Proxy is no longer supported since version 2026.2.0 (https://developers.cloudflare.com/changelog/2025-11-11-cloudflared-proxy-dns/). As an alternative consider using https://developers.cloudflare.com/1.1.1.1/encryption/dns-over-https/dns-over-https-client/")
return err return err
} }

View File

@ -18,6 +18,8 @@ import (
"github.com/urfave/cli/v2" "github.com/urfave/cli/v2"
"nhooyr.io/websocket" "nhooyr.io/websocket"
"github.com/cloudflare/cloudflared/cfapi"
"github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil" "github.com/cloudflare/cloudflared/cmd/cloudflared/cliutil"
cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags" cfdflags "github.com/cloudflare/cloudflared/cmd/cloudflared/flags"
"github.com/cloudflare/cloudflared/credentials" "github.com/cloudflare/cloudflared/credentials"
@ -52,7 +54,7 @@ func buildTailManagementTokenSubcommand() *cli.Command {
func managementTokenCommand(c *cli.Context) error { func managementTokenCommand(c *cli.Context) error {
log := createLogger(c) log := createLogger(c)
token, err := getManagementToken(c, log) token, err := getManagementToken(c, log, cfapi.Logs)
if err != nil { if err != nil {
return err return err
} }
@ -231,7 +233,7 @@ func parseFilters(c *cli.Context) (*management.StreamingFilters, error) {
} }
// getManagementToken will make a call to the Cloudflare API to acquire a management token for the requested tunnel. // getManagementToken will make a call to the Cloudflare API to acquire a management token for the requested tunnel.
func getManagementToken(c *cli.Context, log *zerolog.Logger) (string, error) { func getManagementToken(c *cli.Context, log *zerolog.Logger, res cfapi.ManagementResource) (string, error) {
userCreds, err := credentials.Read(c.String(cfdflags.OriginCert), log) userCreds, err := credentials.Read(c.String(cfdflags.OriginCert), log)
if err != nil { if err != nil {
return "", err return "", err
@ -258,7 +260,7 @@ func getManagementToken(c *cli.Context, log *zerolog.Logger) (string, error) {
return "", errors.New("unable to parse provided tunnel id as a valid UUID") return "", errors.New("unable to parse provided tunnel id as a valid UUID")
} }
token, err := client.GetManagementToken(tunnelID) token, err := client.GetManagementToken(tunnelID, res)
if err != nil { if err != nil {
return "", err return "", err
} }
@ -267,12 +269,12 @@ func getManagementToken(c *cli.Context, log *zerolog.Logger) (string, error) {
} }
// buildURL will build the management url to contain the required query parameters to authenticate the request. // buildURL will build the management url to contain the required query parameters to authenticate the request.
func buildURL(c *cli.Context, log *zerolog.Logger) (url.URL, error) { func buildURL(c *cli.Context, log *zerolog.Logger, res cfapi.ManagementResource) (url.URL, error) {
var err error var err error
token := c.String("token") token := c.String("token")
if token == "" { if token == "" {
token, err = getManagementToken(c, log) token, err = getManagementToken(c, log, res)
if err != nil { if err != nil {
return url.URL{}, fmt.Errorf("unable to acquire management token for requested tunnel id: %w", err) return url.URL{}, fmt.Errorf("unable to acquire management token for requested tunnel id: %w", err)
} }
@ -345,7 +347,7 @@ func Run(c *cli.Context) error {
return nil return nil
} }
u, err := buildURL(c, log) u, err := buildURL(c, log, cfapi.Logs)
if err != nil { if err != nil {
log.Err(err).Msg("unable to construct management request URL") log.Err(err).Msg("unable to construct management request URL")
return nil return nil

17
go.mod
View File

@ -1,6 +1,6 @@
module github.com/cloudflare/cloudflared module github.com/cloudflare/cloudflared
go 1.24 go 1.24.0
require ( require (
github.com/coreos/go-oidc/v3 v3.10.0 github.com/coreos/go-oidc/v3 v3.10.0
@ -24,20 +24,20 @@ require (
github.com/prometheus/client_model v0.6.2 github.com/prometheus/client_model v0.6.2
github.com/quic-go/quic-go v0.52.0 github.com/quic-go/quic-go v0.52.0
github.com/rs/zerolog v1.20.0 github.com/rs/zerolog v1.20.0
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.11.1
github.com/urfave/cli/v2 v2.3.0 github.com/urfave/cli/v2 v2.3.0
go.opentelemetry.io/contrib/propagators v0.22.0 go.opentelemetry.io/contrib/propagators v0.22.0
go.opentelemetry.io/otel v1.35.0 go.opentelemetry.io/otel v1.40.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0
go.opentelemetry.io/otel/sdk v1.35.0 go.opentelemetry.io/otel/sdk v1.40.0
go.opentelemetry.io/otel/trace v1.35.0 go.opentelemetry.io/otel/trace v1.40.0
go.opentelemetry.io/proto/otlp v1.2.0 go.opentelemetry.io/proto/otlp v1.2.0
go.uber.org/automaxprocs v1.6.0 go.uber.org/automaxprocs v1.6.0
go.uber.org/mock v0.5.1 go.uber.org/mock v0.5.1
golang.org/x/crypto v0.38.0 golang.org/x/crypto v0.38.0
golang.org/x/net v0.40.0 golang.org/x/net v0.40.0
golang.org/x/sync v0.14.0 golang.org/x/sync v0.14.0
golang.org/x/sys v0.33.0 golang.org/x/sys v0.40.0
golang.org/x/term v0.32.0 golang.org/x/term v0.32.0
google.golang.org/protobuf v1.36.6 google.golang.org/protobuf v1.36.6
gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0
@ -79,9 +79,8 @@ require (
github.com/prometheus/procfs v0.15.1 // indirect github.com/prometheus/procfs v0.15.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/tinylib/msgp v1.6.3 // indirect github.com/tinylib/msgp v1.6.3 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/otel/metric v1.35.0 // indirect go.opentelemetry.io/otel/metric v1.40.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.35.0 // indirect
golang.org/x/arch v0.4.0 // indirect golang.org/x/arch v0.4.0 // indirect
golang.org/x/mod v0.24.0 // indirect golang.org/x/mod v0.24.0 // indirect
golang.org/x/oauth2 v0.30.0 // indirect golang.org/x/oauth2 v0.30.0 // indirect

36
go.sum
View File

@ -168,8 +168,8 @@ github.com/prometheus/common v0.64.0 h1:pdZeA+g617P7oGv1CzdTzyeShxAGrTBsolKNOLQP
github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8= github.com/prometheus/common v0.64.0/go.mod h1:0gZns+BLRQ3V6NdaerOhMbwwRbNh9hkGINtQAsP5GS8=
github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc= github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0learggepc=
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk= github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs= github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
@ -186,8 +186,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/tinylib/msgp v1.6.3 h1:bCSxiTz386UTgyT1i0MSCvdbWjVW+8sG3PjkGsZQt4s= github.com/tinylib/msgp v1.6.3 h1:bCSxiTz386UTgyT1i0MSCvdbWjVW+8sG3PjkGsZQt4s=
github.com/tinylib/msgp v1.6.3/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA= github.com/tinylib/msgp v1.6.3/go.mod h1:RSp0LW9oSxFut3KzESt5Voq4GVWyS+PSulT77roAqEA=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI= github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
@ -197,24 +197,24 @@ github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVM
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU= github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg= github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.2.1 h1:jXsnJ4Lmnqd11kwkBV2LgLoFMZKizbCi5fNZ/ipaZ64=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= go.opentelemetry.io/auto/sdk v1.2.1/go.mod h1:KRTj+aOaElaLi+wW1kO/DZRXwkF4C5xPbEe3ZiIhN7Y=
go.opentelemetry.io/contrib/propagators v0.22.0 h1:KGdv58M2//veiYLIhb31mofaI2LgkIPXXAZVeYVyfd8= go.opentelemetry.io/contrib/propagators v0.22.0 h1:KGdv58M2//veiYLIhb31mofaI2LgkIPXXAZVeYVyfd8=
go.opentelemetry.io/contrib/propagators v0.22.0/go.mod h1:xGOuXr6lLIF9BXipA4pm6UuOSI0M98U6tsI3khbOiwU= go.opentelemetry.io/contrib/propagators v0.22.0/go.mod h1:xGOuXr6lLIF9BXipA4pm6UuOSI0M98U6tsI3khbOiwU=
go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM= go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM=
go.opentelemetry.io/otel v1.35.0 h1:xKWKPxrxB6OtMCbmMY021CqC45J+3Onta9MqjhnusiQ= go.opentelemetry.io/otel v1.40.0 h1:oA5YeOcpRTXq6NN7frwmwFR0Cn3RhTVZvXsP4duvCms=
go.opentelemetry.io/otel v1.35.0/go.mod h1:UEqy8Zp11hpkUrL73gSlELM0DupHoiq72dR+Zqel/+Y= go.opentelemetry.io/otel v1.40.0/go.mod h1:IMb+uXZUKkMXdPddhwAHm6UfOwJyh4ct1ybIlV14J0g=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs=
go.opentelemetry.io/otel/metric v1.35.0 h1:0znxYu2SNyuMSQT4Y9WDWej0VpcsxkuklLa4/siN90M= go.opentelemetry.io/otel/metric v1.40.0 h1:rcZe317KPftE2rstWIBitCdVp89A2HqjkxR3c11+p9g=
go.opentelemetry.io/otel/metric v1.35.0/go.mod h1:nKVFgxBZ2fReX6IlyW28MgZojkoAkJGaE8CpgeAU3oE= go.opentelemetry.io/otel/metric v1.40.0/go.mod h1:ib/crwQH7N3r5kfiBZQbwrTge743UDc7DTFVZrrXnqc=
go.opentelemetry.io/otel/sdk v1.35.0 h1:iPctf8iprVySXSKJffSS79eOjl9pvxV9ZqOWT0QejKY= go.opentelemetry.io/otel/sdk v1.40.0 h1:KHW/jUzgo6wsPh9At46+h4upjtccTmuZCFAc9OJ71f8=
go.opentelemetry.io/otel/sdk v1.35.0/go.mod h1:+ga1bZliga3DxJ3CQGg3updiaAJoNECOgJREo9KHGQg= go.opentelemetry.io/otel/sdk v1.40.0/go.mod h1:Ph7EFdYvxq72Y8Li9q8KebuYUr2KoeyHx0DRMKrYBUE=
go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= go.opentelemetry.io/otel/sdk/metric v1.40.0 h1:mtmdVqgQkeRxHgRv4qhyJduP3fYJRMX4AtAlbuWdCYw=
go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= go.opentelemetry.io/otel/sdk/metric v1.40.0/go.mod h1:4Z2bGMf0KSK3uRjlczMOeMhKU2rhUqdWNoKcYrtcBPg=
go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4= go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4=
go.opentelemetry.io/otel/trace v1.35.0 h1:dPpEfJu1sDIqruz7BHFG3c7528f6ddfSWfFDVt/xgMs= go.opentelemetry.io/otel/trace v1.40.0 h1:WA4etStDttCSYuhwvEa8OP8I5EWu24lkOzp+ZYblVjw=
go.opentelemetry.io/otel/trace v1.35.0/go.mod h1:WUk7DtFp1Aw2MkvqGdwiXYDZZNvA/1J8o6xRXLrIkyc= go.opentelemetry.io/otel/trace v1.40.0/go.mod h1:zeAhriXecNGP/s2SEG3+Y8X9ujcJOTqQ5RgdEJcawiA=
go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94= go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A= go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs= go.uber.org/automaxprocs v1.6.0 h1:O3y2/QNTOdbF+e/dpXNNW7Rx2hZ4sTIPyybbxyNqTUs=
@ -249,8 +249,8 @@ golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw= golang.org/x/sys v0.40.0 h1:DBZZqJ2Rkml6QMQsZywtnjnnGvHza6BTfYFWY9kjEWQ=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/sys v0.40.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg= golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ= golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=

View File

@ -390,7 +390,8 @@ func Greater(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, "\"%v\" is not greater than \"%v\"", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not greater than \"%v\"", e1, e2)
return compareTwoValues(t, e1, e2, []compareResult{compareGreater}, failMessage, msgAndArgs...)
} }
// GreaterOrEqual asserts that the first element is greater than or equal to the second // GreaterOrEqual asserts that the first element is greater than or equal to the second
@ -403,7 +404,8 @@ func GreaterOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...in
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, "\"%v\" is not greater than or equal to \"%v\"", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not greater than or equal to \"%v\"", e1, e2)
return compareTwoValues(t, e1, e2, []compareResult{compareGreater, compareEqual}, failMessage, msgAndArgs...)
} }
// Less asserts that the first element is less than the second // Less asserts that the first element is less than the second
@ -415,7 +417,8 @@ func Less(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...interface{})
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
return compareTwoValues(t, e1, e2, []compareResult{compareLess}, "\"%v\" is not less than \"%v\"", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not less than \"%v\"", e1, e2)
return compareTwoValues(t, e1, e2, []compareResult{compareLess}, failMessage, msgAndArgs...)
} }
// LessOrEqual asserts that the first element is less than or equal to the second // LessOrEqual asserts that the first element is less than or equal to the second
@ -428,7 +431,8 @@ func LessOrEqual(t TestingT, e1 interface{}, e2 interface{}, msgAndArgs ...inter
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, "\"%v\" is not less than or equal to \"%v\"", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not less than or equal to \"%v\"", e1, e2)
return compareTwoValues(t, e1, e2, []compareResult{compareLess, compareEqual}, failMessage, msgAndArgs...)
} }
// Positive asserts that the specified element is positive // Positive asserts that the specified element is positive
@ -440,7 +444,8 @@ func Positive(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper() h.Helper()
} }
zero := reflect.Zero(reflect.TypeOf(e)) zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, "\"%v\" is not positive", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not positive", e)
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareGreater}, failMessage, msgAndArgs...)
} }
// Negative asserts that the specified element is negative // Negative asserts that the specified element is negative
@ -452,7 +457,8 @@ func Negative(t TestingT, e interface{}, msgAndArgs ...interface{}) bool {
h.Helper() h.Helper()
} }
zero := reflect.Zero(reflect.TypeOf(e)) zero := reflect.Zero(reflect.TypeOf(e))
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, "\"%v\" is not negative", msgAndArgs...) failMessage := fmt.Sprintf("\"%v\" is not negative", e)
return compareTwoValues(t, e, zero.Interface(), []compareResult{compareLess}, failMessage, msgAndArgs...)
} }
func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool { func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedComparesResults []compareResult, failMessage string, msgAndArgs ...interface{}) bool {
@ -468,11 +474,11 @@ func compareTwoValues(t TestingT, e1 interface{}, e2 interface{}, allowedCompare
compareResult, isComparable := compare(e1, e2, e1Kind) compareResult, isComparable := compare(e1, e2, e1Kind)
if !isComparable { if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\"", reflect.TypeOf(e1)), msgAndArgs...) return Fail(t, fmt.Sprintf(`Can not compare type "%T"`, e1), msgAndArgs...)
} }
if !containsValue(allowedComparesResults, compareResult) { if !containsValue(allowedComparesResults, compareResult) {
return Fail(t, fmt.Sprintf(failMessage, e1, e2), msgAndArgs...) return Fail(t, failMessage, msgAndArgs...)
} }
return true return true

View File

@ -50,10 +50,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string
return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) return ElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
} }
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Emptyf asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// assert.Emptyf(t, obj, "error message %s", "formatted") // assert.Emptyf(t, obj, "error message %s", "formatted")
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool { func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -117,10 +126,8 @@ func EqualValuesf(t TestingT, expected interface{}, actual interface{}, msg stri
// Errorf asserts that a function returned an error (i.e. not `nil`). // Errorf asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if assert.Errorf(t, err, "error message %s", "formatted") { // assert.Errorf(t, err, "error message %s", "formatted")
// assert.Equal(t, expectedErrorf, err)
// }
func Errorf(t TestingT, err error, msg string, args ...interface{}) bool { func Errorf(t TestingT, err error, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -438,7 +445,19 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf
return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...) return IsNonIncreasing(t, object, append([]interface{}{msg}, args...)...)
} }
// IsNotTypef asserts that the specified objects are not of the same type.
//
// assert.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return IsNotType(t, theType, object, append([]interface{}{msg}, args...)...)
}
// IsTypef asserts that the specified objects are of the same type. // IsTypef asserts that the specified objects are of the same type.
//
// assert.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -585,8 +604,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str
return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...) return NotElementsMatch(t, listA, listB, append([]interface{}{msg}, args...)...)
} }
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmptyf asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if assert.NotEmptyf(t, obj, "error message %s", "formatted") { // if assert.NotEmptyf(t, obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -693,12 +711,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
} }
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT // NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") // assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
// assert.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
// assert.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -782,11 +803,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
return Same(t, expected, actual, append([]interface{}{msg}, args...)...) return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
} }
// Subsetf asserts that the specified list(array, slice...) or map contains all // Subsetf asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") // assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
// assert.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
// assert.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()

View File

@ -92,10 +92,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st
return ElementsMatchf(a.t, listA, listB, msg, args...) return ElementsMatchf(a.t, listA, listB, msg, args...)
} }
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Empty asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// a.Empty(obj) // a.Empty(obj)
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -103,10 +112,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) bool {
return Empty(a.t, object, msgAndArgs...) return Empty(a.t, object, msgAndArgs...)
} }
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Emptyf asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// a.Emptyf(obj, "error message %s", "formatted") // a.Emptyf(obj, "error message %s", "formatted")
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool { func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -224,10 +242,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string
// Error asserts that a function returned an error (i.e. not `nil`). // Error asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if a.Error(err) { // a.Error(err)
// assert.Equal(t, expectedError, err)
// }
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool { func (a *Assertions) Error(err error, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -297,10 +313,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter
// Errorf asserts that a function returned an error (i.e. not `nil`). // Errorf asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if a.Errorf(err, "error message %s", "formatted") { // a.Errorf(err, "error message %s", "formatted")
// assert.Equal(t, expectedErrorf, err)
// }
func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool { func (a *Assertions) Errorf(err error, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -868,7 +882,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in
return IsNonIncreasingf(a.t, object, msg, args...) return IsNonIncreasingf(a.t, object, msg, args...)
} }
// IsNotType asserts that the specified objects are not of the same type.
//
// a.IsNotType(&NotMyStruct{}, &MyStruct{})
func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNotType(a.t, theType, object, msgAndArgs...)
}
// IsNotTypef asserts that the specified objects are not of the same type.
//
// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return IsNotTypef(a.t, theType, object, msg, args...)
}
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
//
// a.IsType(&MyStruct{}, &MyStruct{})
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -877,6 +913,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd
} }
// IsTypef asserts that the specified objects are of the same type. // IsTypef asserts that the specified objects are of the same type.
//
// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool { func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1162,8 +1200,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg
return NotElementsMatchf(a.t, listA, listB, msg, args...) return NotElementsMatchf(a.t, listA, listB, msg, args...)
} }
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmpty asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if a.NotEmpty(obj) { // if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -1175,8 +1212,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) boo
return NotEmpty(a.t, object, msgAndArgs...) return NotEmpty(a.t, object, msgAndArgs...)
} }
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmptyf asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if a.NotEmptyf(obj, "error message %s", "formatted") { // if a.NotEmptyf(obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -1378,12 +1414,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
return NotSamef(a.t, expected, actual, msg, args...) return NotSamef(a.t, expected, actual, msg, args...)
} }
// NotSubset asserts that the specified list(array, slice...) or map does NOT // NotSubset asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset([1, 3, 4], [1, 2])
// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) // a.NotSubset({"x": 1, "y": 2}, {"z": 3})
// a.NotSubset([1, 3, 4], {1: "one", 2: "two"})
// a.NotSubset({"x": 1, "y": 2}, ["z"])
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1391,12 +1430,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
return NotSubset(a.t, list, subset, msgAndArgs...) return NotSubset(a.t, list, subset, msgAndArgs...)
} }
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT // NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1556,11 +1598,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
return Samef(a.t, expected, actual, msg, args...) return Samef(a.t, expected, actual, msg, args...)
} }
// Subset asserts that the specified list(array, slice...) or map contains all // Subset asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.Subset([1, 2, 3], [1, 2]) // a.Subset([1, 2, 3], [1, 2])
// a.Subset({"x": 1, "y": 2}, {"x": 1}) // a.Subset({"x": 1, "y": 2}, {"x": 1})
// a.Subset([1, 2, 3], {1: "one", 2: "two"})
// a.Subset({"x": 1, "y": 2}, ["x"])
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1568,11 +1614,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
return Subset(a.t, list, subset, msgAndArgs...) return Subset(a.t, list, subset, msgAndArgs...)
} }
// Subsetf asserts that the specified list(array, slice...) or map contains all // Subsetf asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()

View File

@ -33,7 +33,7 @@ func isOrdered(t TestingT, object interface{}, allowedComparesResults []compareR
compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind) compareResult, isComparable := compare(prevValueInterface, valueInterface, firstValueKind)
if !isComparable { if !isComparable {
return Fail(t, fmt.Sprintf("Can not compare type \"%s\" and \"%s\"", reflect.TypeOf(value), reflect.TypeOf(prevValue)), msgAndArgs...) return Fail(t, fmt.Sprintf(`Can not compare type "%T" and "%T"`, value, prevValue), msgAndArgs...)
} }
if !containsValue(allowedComparesResults, compareResult) { if !containsValue(allowedComparesResults, compareResult) {

View File

@ -210,59 +210,77 @@ the problem actually occurred in calling code.*/
// of each stack frame leading from the current test to the assert call that // of each stack frame leading from the current test to the assert call that
// failed. // failed.
func CallerInfo() []string { func CallerInfo() []string {
var pc uintptr var pc uintptr
var ok bool
var file string var file string
var line int var line int
var name string var name string
const stackFrameBufferSize = 10
pcs := make([]uintptr, stackFrameBufferSize)
callers := []string{} callers := []string{}
for i := 0; ; i++ { offset := 1
pc, file, line, ok = runtime.Caller(i)
if !ok { for {
// The breaks below failed to terminate the loop, and we ran off the n := runtime.Callers(offset, pcs)
// end of the call stack.
if n == 0 {
break break
} }
// This is a huge edge case, but it will panic if this is the case, see #180 frames := runtime.CallersFrames(pcs[:n])
if file == "<autogenerated>" {
break
}
f := runtime.FuncForPC(pc) for {
if f == nil { frame, more := frames.Next()
break pc = frame.PC
} file = frame.File
name = f.Name() line = frame.Line
// testing.tRunner is the standard library function that calls // This is a huge edge case, but it will panic if this is the case, see #180
// tests. Subtests are called directly by tRunner, without going through if file == "<autogenerated>" {
// the Test/Benchmark/Example function that contains the t.Run calls, so break
// with subtests we should break when we hit tRunner, without adding it }
// to the list of callers.
if name == "testing.tRunner" {
break
}
parts := strings.Split(file, "/") f := runtime.FuncForPC(pc)
if len(parts) > 1 { if f == nil {
filename := parts[len(parts)-1] break
dir := parts[len(parts)-2] }
if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" { name = f.Name()
callers = append(callers, fmt.Sprintf("%s:%d", file, line))
// testing.tRunner is the standard library function that calls
// tests. Subtests are called directly by tRunner, without going through
// the Test/Benchmark/Example function that contains the t.Run calls, so
// with subtests we should break when we hit tRunner, without adding it
// to the list of callers.
if name == "testing.tRunner" {
break
}
parts := strings.Split(file, "/")
if len(parts) > 1 {
filename := parts[len(parts)-1]
dir := parts[len(parts)-2]
if (dir != "assert" && dir != "mock" && dir != "require") || filename == "mock_test.go" {
callers = append(callers, fmt.Sprintf("%s:%d", file, line))
}
}
// Drop the package
dotPos := strings.LastIndexByte(name, '.')
name = name[dotPos+1:]
if isTest(name, "Test") ||
isTest(name, "Benchmark") ||
isTest(name, "Example") {
break
}
if !more {
break
} }
} }
// Drop the package // Next batch
segments := strings.Split(name, ".") offset += cap(pcs)
name = segments[len(segments)-1]
if isTest(name, "Test") ||
isTest(name, "Benchmark") ||
isTest(name, "Example") {
break
}
} }
return callers return callers
@ -437,17 +455,34 @@ func NotImplements(t TestingT, interfaceObject interface{}, object interface{},
return true return true
} }
func isType(expectedType, object interface{}) bool {
return ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType))
}
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { //
// assert.IsType(t, &MyStruct{}, &MyStruct{})
func IsType(t TestingT, expectedType, object interface{}, msgAndArgs ...interface{}) bool {
if isType(expectedType, object) {
return true
}
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
return Fail(t, fmt.Sprintf("Object expected to be of type %T, but was %T", expectedType, object), msgAndArgs...)
}
if !ObjectsAreEqual(reflect.TypeOf(object), reflect.TypeOf(expectedType)) { // IsNotType asserts that the specified objects are not of the same type.
return Fail(t, fmt.Sprintf("Object expected to be of type %v, but was %v", reflect.TypeOf(expectedType), reflect.TypeOf(object)), msgAndArgs...) //
// assert.IsNotType(t, &NotMyStruct{}, &MyStruct{})
func IsNotType(t TestingT, theType, object interface{}, msgAndArgs ...interface{}) bool {
if !isType(theType, object) {
return true
} }
if h, ok := t.(tHelper); ok {
return true h.Helper()
}
return Fail(t, fmt.Sprintf("Object type expected to be different than %T", theType), msgAndArgs...)
} }
// Equal asserts that two objects are equal. // Equal asserts that two objects are equal.
@ -475,7 +510,6 @@ func Equal(t TestingT, expected, actual interface{}, msgAndArgs ...interface{})
} }
return true return true
} }
// validateEqualArgs checks whether provided arguments can be safely used in the // validateEqualArgs checks whether provided arguments can be safely used in the
@ -510,8 +544,9 @@ func Same(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}) b
if !same { if !same {
// both are pointers but not the same type & pointing to the same address // both are pointers but not the same type & pointing to the same address
return Fail(t, fmt.Sprintf("Not same: \n"+ return Fail(t, fmt.Sprintf("Not same: \n"+
"expected: %p %#v\n"+ "expected: %p %#[1]v\n"+
"actual : %p %#v", expected, expected, actual, actual), msgAndArgs...) "actual : %p %#[2]v",
expected, actual), msgAndArgs...)
} }
return true return true
@ -530,14 +565,14 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
same, ok := samePointers(expected, actual) same, ok := samePointers(expected, actual)
if !ok { if !ok {
//fails when the arguments are not pointers // fails when the arguments are not pointers
return !(Fail(t, "Both arguments must be pointers", msgAndArgs...)) return !(Fail(t, "Both arguments must be pointers", msgAndArgs...))
} }
if same { if same {
return Fail(t, fmt.Sprintf( return Fail(t, fmt.Sprintf(
"Expected and actual point to the same object: %p %#v", "Expected and actual point to the same object: %p %#[1]v",
expected, expected), msgAndArgs...) expected), msgAndArgs...)
} }
return true return true
} }
@ -549,7 +584,7 @@ func NotSame(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
func samePointers(first, second interface{}) (same bool, ok bool) { func samePointers(first, second interface{}) (same bool, ok bool) {
firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second) firstPtr, secondPtr := reflect.ValueOf(first), reflect.ValueOf(second)
if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr { if firstPtr.Kind() != reflect.Ptr || secondPtr.Kind() != reflect.Ptr {
return false, false //not both are pointers return false, false // not both are pointers
} }
firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second) firstType, secondType := reflect.TypeOf(first), reflect.TypeOf(second)
@ -610,7 +645,6 @@ func EqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...interfa
} }
return true return true
} }
// EqualExportedValues asserts that the types of two objects are equal and their public // EqualExportedValues asserts that the types of two objects are equal and their public
@ -665,7 +699,6 @@ func Exactly(t TestingT, expected, actual interface{}, msgAndArgs ...interface{}
} }
return Equal(t, expected, actual, msgAndArgs...) return Equal(t, expected, actual, msgAndArgs...)
} }
// NotNil asserts that the specified object is not nil. // NotNil asserts that the specified object is not nil.
@ -715,37 +748,45 @@ func Nil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
// isEmpty gets whether the specified object is considered empty or not. // isEmpty gets whether the specified object is considered empty or not.
func isEmpty(object interface{}) bool { func isEmpty(object interface{}) bool {
// get nil case out of the way // get nil case out of the way
if object == nil { if object == nil {
return true return true
} }
objValue := reflect.ValueOf(object) return isEmptyValue(reflect.ValueOf(object))
switch objValue.Kind() {
// collection types are empty when they have no element
case reflect.Chan, reflect.Map, reflect.Slice:
return objValue.Len() == 0
// pointers are empty if nil or if the value they point to is empty
case reflect.Ptr:
if objValue.IsNil() {
return true
}
deref := objValue.Elem().Interface()
return isEmpty(deref)
// for all other types, compare against the zero value
// array types are empty when they match their zero-initialized state
default:
zero := reflect.Zero(objValue.Type())
return reflect.DeepEqual(object, zero.Interface())
}
} }
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // isEmptyValue gets whether the specified reflect.Value is considered empty or not.
// a slice or a channel with len == 0. func isEmptyValue(objValue reflect.Value) bool {
if objValue.IsZero() {
return true
}
// Special cases of non-zero values that we consider empty
switch objValue.Kind() {
// collection types are empty when they have no element
// Note: array types are empty when they match their zero-initialized state.
case reflect.Chan, reflect.Map, reflect.Slice:
return objValue.Len() == 0
// non-nil pointers are empty if the value they point to is empty
case reflect.Ptr:
return isEmptyValue(objValue.Elem())
}
return false
}
// Empty asserts that the given value is "empty".
//
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// assert.Empty(t, obj) // assert.Empty(t, obj)
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool { func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
pass := isEmpty(object) pass := isEmpty(object)
if !pass { if !pass {
@ -756,11 +797,9 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
} }
return pass return pass
} }
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmpty asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if assert.NotEmpty(t, obj) { // if assert.NotEmpty(t, obj) {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -775,7 +814,6 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
} }
return pass return pass
} }
// getLen tries to get the length of an object. // getLen tries to get the length of an object.
@ -819,7 +857,6 @@ func True(t TestingT, value bool, msgAndArgs ...interface{}) bool {
} }
return true return true
} }
// False asserts that the specified value is false. // False asserts that the specified value is false.
@ -834,7 +871,6 @@ func False(t TestingT, value bool, msgAndArgs ...interface{}) bool {
} }
return true return true
} }
// NotEqual asserts that the specified values are NOT equal. // NotEqual asserts that the specified values are NOT equal.
@ -857,7 +893,6 @@ func NotEqual(t TestingT, expected, actual interface{}, msgAndArgs ...interface{
} }
return true return true
} }
// NotEqualValues asserts that two objects are not equal even when converted to the same type // NotEqualValues asserts that two objects are not equal even when converted to the same type
@ -880,7 +915,6 @@ func NotEqualValues(t TestingT, expected, actual interface{}, msgAndArgs ...inte
// return (true, false) if element was not found. // return (true, false) if element was not found.
// return (true, true) if element was found. // return (true, true) if element was found.
func containsElement(list interface{}, element interface{}) (ok, found bool) { func containsElement(list interface{}, element interface{}) (ok, found bool) {
listValue := reflect.ValueOf(list) listValue := reflect.ValueOf(list)
listType := reflect.TypeOf(list) listType := reflect.TypeOf(list)
if listType == nil { if listType == nil {
@ -915,7 +949,6 @@ func containsElement(list interface{}, element interface{}) (ok, found bool) {
} }
} }
return true, false return true, false
} }
// Contains asserts that the specified string, list(array, slice...) or map contains the // Contains asserts that the specified string, list(array, slice...) or map contains the
@ -938,7 +971,6 @@ func Contains(t TestingT, s, contains interface{}, msgAndArgs ...interface{}) bo
} }
return true return true
} }
// NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the // NotContains asserts that the specified string, list(array, slice...) or map does NOT contain the
@ -961,14 +993,17 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
} }
return true return true
} }
// Subset asserts that the specified list(array, slice...) or map contains all // Subset asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// assert.Subset(t, [1, 2, 3], [1, 2]) // assert.Subset(t, [1, 2, 3], [1, 2])
// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1}) // assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
// assert.Subset(t, [1, 2, 3], {1: "one", 2: "two"})
// assert.Subset(t, {"x": 1, "y": 2}, ["x"])
func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -983,7 +1018,7 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
} }
subsetKind := reflect.TypeOf(subset).Kind() subsetKind := reflect.TypeOf(subset).Kind()
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
} }
@ -1007,6 +1042,13 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
} }
subsetList := reflect.ValueOf(subset) subsetList := reflect.ValueOf(subset)
if subsetKind == reflect.Map {
keys := make([]interface{}, subsetList.Len())
for idx, key := range subsetList.MapKeys() {
keys[idx] = key.Interface()
}
subsetList = reflect.ValueOf(keys)
}
for i := 0; i < subsetList.Len(); i++ { for i := 0; i < subsetList.Len(); i++ {
element := subsetList.Index(i).Interface() element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element) ok, found := containsElement(list, element)
@ -1021,12 +1063,15 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true return true
} }
// NotSubset asserts that the specified list(array, slice...) or map does NOT // NotSubset asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// assert.NotSubset(t, [1, 3, 4], [1, 2]) // assert.NotSubset(t, [1, 3, 4], [1, 2])
// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) // assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
// assert.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"})
// assert.NotSubset(t, {"x": 1, "y": 2}, ["z"])
func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1041,7 +1086,7 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
} }
subsetKind := reflect.TypeOf(subset).Kind() subsetKind := reflect.TypeOf(subset).Kind()
if subsetKind != reflect.Array && subsetKind != reflect.Slice && listKind != reflect.Map { if subsetKind != reflect.Array && subsetKind != reflect.Slice && subsetKind != reflect.Map {
return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...) return Fail(t, fmt.Sprintf("%q has an unsupported type %s", subset, subsetKind), msgAndArgs...)
} }
@ -1065,11 +1110,18 @@ func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{})
} }
subsetList := reflect.ValueOf(subset) subsetList := reflect.ValueOf(subset)
if subsetKind == reflect.Map {
keys := make([]interface{}, subsetList.Len())
for idx, key := range subsetList.MapKeys() {
keys[idx] = key.Interface()
}
subsetList = reflect.ValueOf(keys)
}
for i := 0; i < subsetList.Len(); i++ { for i := 0; i < subsetList.Len(); i++ {
element := subsetList.Index(i).Interface() element := subsetList.Index(i).Interface()
ok, found := containsElement(list, element) ok, found := containsElement(list, element)
if !ok { if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", list), msgAndArgs...) return Fail(t, fmt.Sprintf("%q could not be applied builtin len()", list), msgAndArgs...)
} }
if !found { if !found {
return true return true
@ -1591,10 +1643,8 @@ func NoError(t TestingT, err error, msgAndArgs ...interface{}) bool {
// Error asserts that a function returned an error (i.e. not `nil`). // Error asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if assert.Error(t, err) { // assert.Error(t, err)
// assert.Equal(t, expectedError, err)
// }
func Error(t TestingT, err error, msgAndArgs ...interface{}) bool { func Error(t TestingT, err error, msgAndArgs ...interface{}) bool {
if err == nil { if err == nil {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
@ -1667,7 +1717,6 @@ func matchRegexp(rx interface{}, str interface{}) bool {
default: default:
return r.MatchString(fmt.Sprint(v)) return r.MatchString(fmt.Sprint(v))
} }
} }
// Regexp asserts that a specified regexp matches a string. // Regexp asserts that a specified regexp matches a string.
@ -1703,7 +1752,6 @@ func NotRegexp(t TestingT, rx interface{}, str interface{}, msgAndArgs ...interf
} }
return !match return !match
} }
// Zero asserts that i is the zero value for its type. // Zero asserts that i is the zero value for its type.
@ -1814,6 +1862,11 @@ func JSONEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...) return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid json.\nJSON parsing error: '%s'", expected, err.Error()), msgAndArgs...)
} }
// Shortcut if same bytes
if actual == expected {
return true
}
if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil { if err := json.Unmarshal([]byte(actual), &actualJSONAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...) return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid json.\nJSON parsing error: '%s'", actual, err.Error()), msgAndArgs...)
} }
@ -1832,6 +1885,11 @@ func YAMLEq(t TestingT, expected string, actual string, msgAndArgs ...interface{
return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...) return Fail(t, fmt.Sprintf("Expected value ('%s') is not valid yaml.\nYAML parsing error: '%s'", expected, err.Error()), msgAndArgs...)
} }
// Shortcut if same bytes
if actual == expected {
return true
}
if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil { if err := yaml.Unmarshal([]byte(actual), &actualYAMLAsInterface); err != nil {
return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...) return Fail(t, fmt.Sprintf("Input ('%s') needs to be valid yaml.\nYAML error: '%s'", actual, err.Error()), msgAndArgs...)
} }
@ -1933,6 +1991,7 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
} }
ch := make(chan bool, 1) ch := make(chan bool, 1)
checkCond := func() { ch <- condition() }
timer := time.NewTimer(waitFor) timer := time.NewTimer(waitFor)
defer timer.Stop() defer timer.Stop()
@ -1940,18 +1999,23 @@ func Eventually(t TestingT, condition func() bool, waitFor time.Duration, tick t
ticker := time.NewTicker(tick) ticker := time.NewTicker(tick)
defer ticker.Stop() defer ticker.Stop()
for tick := ticker.C; ; { var tickC <-chan time.Time
// Check the condition once first on the initial call.
go checkCond()
for {
select { select {
case <-timer.C: case <-timer.C:
return Fail(t, "Condition never satisfied", msgAndArgs...) return Fail(t, "Condition never satisfied", msgAndArgs...)
case <-tick: case <-tickC:
tick = nil tickC = nil
go func() { ch <- condition() }() go checkCond()
case v := <-ch: case v := <-ch:
if v { if v {
return true return true
} }
tick = ticker.C tickC = ticker.C
} }
} }
} }
@ -1964,6 +2028,9 @@ type CollectT struct {
errors []error errors []error
} }
// Helper is like [testing.T.Helper] but does nothing.
func (CollectT) Helper() {}
// Errorf collects the error. // Errorf collects the error.
func (c *CollectT) Errorf(format string, args ...interface{}) { func (c *CollectT) Errorf(format string, args ...interface{}) {
c.errors = append(c.errors, fmt.Errorf(format, args...)) c.errors = append(c.errors, fmt.Errorf(format, args...))
@ -2021,35 +2088,42 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
var lastFinishedTickErrs []error var lastFinishedTickErrs []error
ch := make(chan *CollectT, 1) ch := make(chan *CollectT, 1)
checkCond := func() {
collect := new(CollectT)
defer func() {
ch <- collect
}()
condition(collect)
}
timer := time.NewTimer(waitFor) timer := time.NewTimer(waitFor)
defer timer.Stop() defer timer.Stop()
ticker := time.NewTicker(tick) ticker := time.NewTicker(tick)
defer ticker.Stop() defer ticker.Stop()
for tick := ticker.C; ; { var tickC <-chan time.Time
// Check the condition once first on the initial call.
go checkCond()
for {
select { select {
case <-timer.C: case <-timer.C:
for _, err := range lastFinishedTickErrs { for _, err := range lastFinishedTickErrs {
t.Errorf("%v", err) t.Errorf("%v", err)
} }
return Fail(t, "Condition never satisfied", msgAndArgs...) return Fail(t, "Condition never satisfied", msgAndArgs...)
case <-tick: case <-tickC:
tick = nil tickC = nil
go func() { go checkCond()
collect := new(CollectT)
defer func() {
ch <- collect
}()
condition(collect)
}()
case collect := <-ch: case collect := <-ch:
if !collect.failed() { if !collect.failed() {
return true return true
} }
// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached. // Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
lastFinishedTickErrs = collect.errors lastFinishedTickErrs = collect.errors
tick = ticker.C tickC = ticker.C
} }
} }
} }
@ -2064,6 +2138,7 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
} }
ch := make(chan bool, 1) ch := make(chan bool, 1)
checkCond := func() { ch <- condition() }
timer := time.NewTimer(waitFor) timer := time.NewTimer(waitFor)
defer timer.Stop() defer timer.Stop()
@ -2071,18 +2146,23 @@ func Never(t TestingT, condition func() bool, waitFor time.Duration, tick time.D
ticker := time.NewTicker(tick) ticker := time.NewTicker(tick)
defer ticker.Stop() defer ticker.Stop()
for tick := ticker.C; ; { var tickC <-chan time.Time
// Check the condition once first on the initial call.
go checkCond()
for {
select { select {
case <-timer.C: case <-timer.C:
return true return true
case <-tick: case <-tickC:
tick = nil tickC = nil
go func() { ch <- condition() }() go checkCond()
case v := <-ch: case v := <-ch:
if v { if v {
return Fail(t, "Condition satisfied", msgAndArgs...) return Fail(t, "Condition satisfied", msgAndArgs...)
} }
tick = ticker.C tickC = ticker.C
} }
} }
} }
@ -2100,9 +2180,12 @@ func ErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
var expectedText string var expectedText string
if target != nil { if target != nil {
expectedText = target.Error() expectedText = target.Error()
if err == nil {
return Fail(t, fmt.Sprintf("Expected error with %q in chain but got nil.", expectedText), msgAndArgs...)
}
} }
chain := buildErrorChainString(err) chain := buildErrorChainString(err, false)
return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+ return Fail(t, fmt.Sprintf("Target error should be in err chain:\n"+
"expected: %q\n"+ "expected: %q\n"+
@ -2125,7 +2208,7 @@ func NotErrorIs(t TestingT, err, target error, msgAndArgs ...interface{}) bool {
expectedText = target.Error() expectedText = target.Error()
} }
chain := buildErrorChainString(err) chain := buildErrorChainString(err, false)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
"found: %q\n"+ "found: %q\n"+
@ -2143,11 +2226,17 @@ func ErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interface{
return true return true
} }
chain := buildErrorChainString(err) expectedType := reflect.TypeOf(target).Elem().String()
if err == nil {
return Fail(t, fmt.Sprintf("An error is expected but got nil.\n"+
"expected: %s", expectedType), msgAndArgs...)
}
chain := buildErrorChainString(err, true)
return Fail(t, fmt.Sprintf("Should be in error chain:\n"+ return Fail(t, fmt.Sprintf("Should be in error chain:\n"+
"expected: %q\n"+ "expected: %s\n"+
"in chain: %s", target, chain, "in chain: %s", expectedType, chain,
), msgAndArgs...) ), msgAndArgs...)
} }
@ -2161,24 +2250,46 @@ func NotErrorAs(t TestingT, err error, target interface{}, msgAndArgs ...interfa
return true return true
} }
chain := buildErrorChainString(err) chain := buildErrorChainString(err, true)
return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+ return Fail(t, fmt.Sprintf("Target error should not be in err chain:\n"+
"found: %q\n"+ "found: %s\n"+
"in chain: %s", target, chain, "in chain: %s", reflect.TypeOf(target).Elem().String(), chain,
), msgAndArgs...) ), msgAndArgs...)
} }
func buildErrorChainString(err error) string { func unwrapAll(err error) (errs []error) {
errs = append(errs, err)
switch x := err.(type) {
case interface{ Unwrap() error }:
err = x.Unwrap()
if err == nil {
return
}
errs = append(errs, unwrapAll(err)...)
case interface{ Unwrap() []error }:
for _, err := range x.Unwrap() {
errs = append(errs, unwrapAll(err)...)
}
}
return
}
func buildErrorChainString(err error, withType bool) string {
if err == nil { if err == nil {
return "" return ""
} }
e := errors.Unwrap(err) var chain string
chain := fmt.Sprintf("%q", err.Error()) errs := unwrapAll(err)
for e != nil { for i := range errs {
chain += fmt.Sprintf("\n\t%q", e.Error()) if i != 0 {
e = errors.Unwrap(e) chain += "\n\t"
}
chain += fmt.Sprintf("%q", errs[i].Error())
if withType {
chain += fmt.Sprintf(" (%T)", errs[i])
}
} }
return chain return chain
} }

View File

@ -1,5 +1,9 @@
// Package assert provides a set of comprehensive testing tools for use with the normal Go testing system. // Package assert provides a set of comprehensive testing tools for use with the normal Go testing system.
// //
// # Note
//
// All functions in this package return a bool value indicating whether the assertion has passed.
//
// # Example Usage // # Example Usage
// //
// The following is a complete example using assert in a standard test function: // The following is a complete example using assert in a standard test function:

View File

@ -138,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
contains := strings.Contains(body, fmt.Sprint(str)) contains := strings.Contains(body, fmt.Sprint(str))
if !contains { if !contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) Fail(t, fmt.Sprintf("Expected response body for %q to contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
} }
return contains return contains
@ -158,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin
contains := strings.Contains(body, fmt.Sprint(str)) contains := strings.Contains(body, fmt.Sprint(str))
if contains { if contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...) Fail(t, fmt.Sprintf("Expected response body for %q to NOT contain %q but found %q", url+"?"+values.Encode(), str, body), msgAndArgs...)
} }
return !contains return !contains

View File

@ -1,5 +1,4 @@
//go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default //go:build testify_yaml_custom && !testify_yaml_fail && !testify_yaml_default
// +build testify_yaml_custom,!testify_yaml_fail,!testify_yaml_default
// Package yaml is an implementation of YAML functions that calls a pluggable implementation. // Package yaml is an implementation of YAML functions that calls a pluggable implementation.
// //

View File

@ -1,5 +1,4 @@
//go:build !testify_yaml_fail && !testify_yaml_custom //go:build !testify_yaml_fail && !testify_yaml_custom
// +build !testify_yaml_fail,!testify_yaml_custom
// Package yaml is just an indirection to handle YAML deserialization. // Package yaml is just an indirection to handle YAML deserialization.
// //

View File

@ -1,5 +1,4 @@
//go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default //go:build testify_yaml_fail && !testify_yaml_custom && !testify_yaml_default
// +build testify_yaml_fail,!testify_yaml_custom,!testify_yaml_default
// Package yaml is an implementation of YAML functions that always fail. // Package yaml is an implementation of YAML functions that always fail.
// //

View File

@ -23,6 +23,8 @@
// //
// The `require` package have same global functions as in the `assert` package, // The `require` package have same global functions as in the `assert` package,
// but instead of returning a boolean result they call `t.FailNow()`. // but instead of returning a boolean result they call `t.FailNow()`.
// A consequence of this is that it must be called from the goroutine running
// the test function, not from other goroutines created during the test.
// //
// Every assertion function also takes an optional string message as the final argument, // Every assertion function also takes an optional string message as the final argument,
// allowing custom error messages to be appended to the message the assertion method outputs. // allowing custom error messages to be appended to the message the assertion method outputs.

View File

@ -117,10 +117,19 @@ func ElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg string
t.FailNow() t.FailNow()
} }
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Empty asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// require.Empty(t, obj) // require.Empty(t, obj)
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) { func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -131,10 +140,19 @@ func Empty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
t.FailNow() t.FailNow()
} }
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Emptyf asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// require.Emptyf(t, obj, "error message %s", "formatted") // require.Emptyf(t, obj, "error message %s", "formatted")
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) { func Emptyf(t TestingT, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -279,10 +297,8 @@ func Equalf(t TestingT, expected interface{}, actual interface{}, msg string, ar
// Error asserts that a function returned an error (i.e. not `nil`). // Error asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if require.Error(t, err) { // require.Error(t, err)
// require.Equal(t, expectedError, err)
// }
func Error(t TestingT, err error, msgAndArgs ...interface{}) { func Error(t TestingT, err error, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -373,10 +389,8 @@ func ErrorIsf(t TestingT, err error, target error, msg string, args ...interface
// Errorf asserts that a function returned an error (i.e. not `nil`). // Errorf asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if require.Errorf(t, err, "error message %s", "formatted") { // require.Errorf(t, err, "error message %s", "formatted")
// require.Equal(t, expectedErrorf, err)
// }
func Errorf(t TestingT, err error, msg string, args ...interface{}) { func Errorf(t TestingT, err error, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1097,7 +1111,35 @@ func IsNonIncreasingf(t TestingT, object interface{}, msg string, args ...interf
t.FailNow() t.FailNow()
} }
// IsNotType asserts that the specified objects are not of the same type.
//
// require.IsNotType(t, &NotMyStruct{}, &MyStruct{})
func IsNotType(t TestingT, theType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsNotType(t, theType, object, msgAndArgs...) {
return
}
t.FailNow()
}
// IsNotTypef asserts that the specified objects are not of the same type.
//
// require.IsNotTypef(t, &NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsNotTypef(t TestingT, theType interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.IsNotTypef(t, theType, object, msg, args...) {
return
}
t.FailNow()
}
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
//
// require.IsType(t, &MyStruct{}, &MyStruct{})
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1109,6 +1151,8 @@ func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs
} }
// IsTypef asserts that the specified objects are of the same type. // IsTypef asserts that the specified objects are of the same type.
//
// require.IsTypef(t, &MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) { func IsTypef(t TestingT, expectedType interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1469,8 +1513,7 @@ func NotElementsMatchf(t TestingT, listA interface{}, listB interface{}, msg str
t.FailNow() t.FailNow()
} }
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmpty asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if require.NotEmpty(t, obj) { // if require.NotEmpty(t, obj) {
// require.Equal(t, "two", obj[1]) // require.Equal(t, "two", obj[1])
@ -1485,8 +1528,7 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) {
t.FailNow() t.FailNow()
} }
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmptyf asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if require.NotEmptyf(t, obj, "error message %s", "formatted") { // if require.NotEmptyf(t, obj, "error message %s", "formatted") {
// require.Equal(t, "two", obj[1]) // require.Equal(t, "two", obj[1])
@ -1745,12 +1787,15 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
t.FailNow() t.FailNow()
} }
// NotSubset asserts that the specified list(array, slice...) or map does NOT // NotSubset asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// require.NotSubset(t, [1, 3, 4], [1, 2]) // require.NotSubset(t, [1, 3, 4], [1, 2])
// require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3}) // require.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
// require.NotSubset(t, [1, 3, 4], {1: "one", 2: "two"})
// require.NotSubset(t, {"x": 1, "y": 2}, ["z"])
func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1761,12 +1806,15 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i
t.FailNow() t.FailNow()
} }
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT // NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted") // require.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
// require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") // require.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
// require.NotSubsetf(t, [1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
// require.NotSubsetf(t, {"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1971,11 +2019,15 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
t.FailNow() t.FailNow()
} }
// Subset asserts that the specified list(array, slice...) or map contains all // Subset asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// require.Subset(t, [1, 2, 3], [1, 2]) // require.Subset(t, [1, 2, 3], [1, 2])
// require.Subset(t, {"x": 1, "y": 2}, {"x": 1}) // require.Subset(t, {"x": 1, "y": 2}, {"x": 1})
// require.Subset(t, [1, 2, 3], {1: "one", 2: "two"})
// require.Subset(t, {"x": 1, "y": 2}, ["x"])
func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1986,11 +2038,15 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte
t.FailNow() t.FailNow()
} }
// Subsetf asserts that the specified list(array, slice...) or map contains all // Subsetf asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted") // require.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
// require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") // require.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
// require.Subsetf(t, [1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
// require.Subsetf(t, {"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()

View File

@ -93,10 +93,19 @@ func (a *Assertions) ElementsMatchf(listA interface{}, listB interface{}, msg st
ElementsMatchf(a.t, listA, listB, msg, args...) ElementsMatchf(a.t, listA, listB, msg, args...)
} }
// Empty asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Empty asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// a.Empty(obj) // a.Empty(obj)
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) { func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -104,10 +113,19 @@ func (a *Assertions) Empty(object interface{}, msgAndArgs ...interface{}) {
Empty(a.t, object, msgAndArgs...) Empty(a.t, object, msgAndArgs...)
} }
// Emptyf asserts that the specified object is empty. I.e. nil, "", false, 0 or either // Emptyf asserts that the given value is "empty".
// a slice or a channel with len == 0. //
// [Zero values] are "empty".
//
// Arrays are "empty" if every element is the zero value of the type (stricter than "empty").
//
// Slices, maps and channels with zero length are "empty".
//
// Pointer values are "empty" if the pointer is nil or if the pointed value is "empty".
// //
// a.Emptyf(obj, "error message %s", "formatted") // a.Emptyf(obj, "error message %s", "formatted")
//
// [Zero values]: https://go.dev/ref/spec#The_zero_value
func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) { func (a *Assertions) Emptyf(object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -225,10 +243,8 @@ func (a *Assertions) Equalf(expected interface{}, actual interface{}, msg string
// Error asserts that a function returned an error (i.e. not `nil`). // Error asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if a.Error(err) { // a.Error(err)
// assert.Equal(t, expectedError, err)
// }
func (a *Assertions) Error(err error, msgAndArgs ...interface{}) { func (a *Assertions) Error(err error, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -298,10 +314,8 @@ func (a *Assertions) ErrorIsf(err error, target error, msg string, args ...inter
// Errorf asserts that a function returned an error (i.e. not `nil`). // Errorf asserts that a function returned an error (i.e. not `nil`).
// //
// actualObj, err := SomeFunction() // actualObj, err := SomeFunction()
// if a.Errorf(err, "error message %s", "formatted") { // a.Errorf(err, "error message %s", "formatted")
// assert.Equal(t, expectedErrorf, err)
// }
func (a *Assertions) Errorf(err error, msg string, args ...interface{}) { func (a *Assertions) Errorf(err error, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -869,7 +883,29 @@ func (a *Assertions) IsNonIncreasingf(object interface{}, msg string, args ...in
IsNonIncreasingf(a.t, object, msg, args...) IsNonIncreasingf(a.t, object, msg, args...)
} }
// IsNotType asserts that the specified objects are not of the same type.
//
// a.IsNotType(&NotMyStruct{}, &MyStruct{})
func (a *Assertions) IsNotType(theType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsNotType(a.t, theType, object, msgAndArgs...)
}
// IsNotTypef asserts that the specified objects are not of the same type.
//
// a.IsNotTypef(&NotMyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsNotTypef(theType interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
IsNotTypef(a.t, theType, object, msg, args...)
}
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
//
// a.IsType(&MyStruct{}, &MyStruct{})
func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) { func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -878,6 +914,8 @@ func (a *Assertions) IsType(expectedType interface{}, object interface{}, msgAnd
} }
// IsTypef asserts that the specified objects are of the same type. // IsTypef asserts that the specified objects are of the same type.
//
// a.IsTypef(&MyStruct{}, &MyStruct{}, "error message %s", "formatted")
func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) { func (a *Assertions) IsTypef(expectedType interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1163,8 +1201,7 @@ func (a *Assertions) NotElementsMatchf(listA interface{}, listB interface{}, msg
NotElementsMatchf(a.t, listA, listB, msg, args...) NotElementsMatchf(a.t, listA, listB, msg, args...)
} }
// NotEmpty asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmpty asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if a.NotEmpty(obj) { // if a.NotEmpty(obj) {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -1176,8 +1213,7 @@ func (a *Assertions) NotEmpty(object interface{}, msgAndArgs ...interface{}) {
NotEmpty(a.t, object, msgAndArgs...) NotEmpty(a.t, object, msgAndArgs...)
} }
// NotEmptyf asserts that the specified object is NOT empty. I.e. not nil, "", false, 0 or either // NotEmptyf asserts that the specified object is NOT [Empty].
// a slice or a channel with len == 0.
// //
// if a.NotEmptyf(obj, "error message %s", "formatted") { // if a.NotEmptyf(obj, "error message %s", "formatted") {
// assert.Equal(t, "two", obj[1]) // assert.Equal(t, "two", obj[1])
@ -1379,12 +1415,15 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
NotSamef(a.t, expected, actual, msg, args...) NotSamef(a.t, expected, actual, msg, args...)
} }
// NotSubset asserts that the specified list(array, slice...) or map does NOT // NotSubset asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.NotSubset([1, 3, 4], [1, 2]) // a.NotSubset([1, 3, 4], [1, 2])
// a.NotSubset({"x": 1, "y": 2}, {"z": 3}) // a.NotSubset({"x": 1, "y": 2}, {"z": 3})
// a.NotSubset([1, 3, 4], {1: "one", 2: "two"})
// a.NotSubset({"x": 1, "y": 2}, ["z"])
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1392,12 +1431,15 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
NotSubset(a.t, list, subset, msgAndArgs...) NotSubset(a.t, list, subset, msgAndArgs...)
} }
// NotSubsetf asserts that the specified list(array, slice...) or map does NOT // NotSubsetf asserts that the list (array, slice, or map) does NOT contain all
// contain all elements given in the specified subset list(array, slice...) or // elements given in the subset (array, slice, or map).
// map. // Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted") // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted") // a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
// a.NotSubsetf([1, 3, 4], {1: "one", 2: "two"}, "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, ["z"], "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1557,11 +1599,15 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
Samef(a.t, expected, actual, msg, args...) Samef(a.t, expected, actual, msg, args...)
} }
// Subset asserts that the specified list(array, slice...) or map contains all // Subset asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.Subset([1, 2, 3], [1, 2]) // a.Subset([1, 2, 3], [1, 2])
// a.Subset({"x": 1, "y": 2}, {"x": 1}) // a.Subset({"x": 1, "y": 2}, {"x": 1})
// a.Subset([1, 2, 3], {1: "one", 2: "two"})
// a.Subset({"x": 1, "y": 2}, ["x"])
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1569,11 +1615,15 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
Subset(a.t, list, subset, msgAndArgs...) Subset(a.t, list, subset, msgAndArgs...)
} }
// Subsetf asserts that the specified list(array, slice...) or map contains all // Subsetf asserts that the list (array, slice, or map) contains all elements
// elements given in the specified subset list(array, slice...) or map. // given in the subset (array, slice, or map).
// Map elements are key-value pairs unless compared with an array or slice where
// only the map key is evaluated.
// //
// a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted") // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted") // a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
// a.Subsetf([1, 2, 3], {1: "one", 2: "two"}, "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, ["x"], "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()

View File

@ -82,7 +82,7 @@ func marshalJSON(id []byte) ([]byte, error) {
} }
// unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes. // unmarshalJSON inflates trace id from hex string, possibly enclosed in quotes.
func unmarshalJSON(dst []byte, src []byte) error { func unmarshalJSON(dst, src []byte) error {
if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' { if l := len(src); l >= 2 && src[0] == '"' && src[l-1] == '"' {
src = src[1 : l-1] src = src[1 : l-1]
} }

View File

@ -41,7 +41,7 @@ func (i *protoInt64) UnmarshalJSON(data []byte) error {
// strings or integers. // strings or integers.
type protoUint64 uint64 type protoUint64 uint64
// Int64 returns the protoUint64 as a uint64. // Uint64 returns the protoUint64 as a uint64.
func (i *protoUint64) Uint64() uint64 { return uint64(*i) } func (i *protoUint64) Uint64() uint64 { return uint64(*i) }
// UnmarshalJSON decodes both strings and integers. // UnmarshalJSON decodes both strings and integers.

View File

@ -10,6 +10,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math"
"time" "time"
) )
@ -151,8 +152,8 @@ func (s Span) MarshalJSON() ([]byte, error) {
}{ }{
Alias: Alias(s), Alias: Alias(s),
ParentSpanID: parentSpanId, ParentSpanID: parentSpanId,
StartTime: uint64(startT), StartTime: uint64(startT), // nolint:gosec // >0 checked above.
EndTime: uint64(endT), EndTime: uint64(endT), // nolint:gosec // >0 checked above.
}) })
} }
@ -201,11 +202,13 @@ func (s *Span) UnmarshalJSON(data []byte) error {
case "startTimeUnixNano", "start_time_unix_nano": case "startTimeUnixNano", "start_time_unix_nano":
var val protoUint64 var val protoUint64
err = decoder.Decode(&val) err = decoder.Decode(&val)
s.StartTime = time.Unix(0, int64(val.Uint64())) v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
s.StartTime = time.Unix(0, v)
case "endTimeUnixNano", "end_time_unix_nano": case "endTimeUnixNano", "end_time_unix_nano":
var val protoUint64 var val protoUint64
err = decoder.Decode(&val) err = decoder.Decode(&val)
s.EndTime = time.Unix(0, int64(val.Uint64())) v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
s.EndTime = time.Unix(0, v)
case "attributes": case "attributes":
err = decoder.Decode(&s.Attrs) err = decoder.Decode(&s.Attrs)
case "droppedAttributesCount", "dropped_attributes_count": case "droppedAttributesCount", "dropped_attributes_count":
@ -248,13 +251,20 @@ func (s *Span) UnmarshalJSON(data []byte) error {
type SpanFlags int32 type SpanFlags int32
const ( const (
// SpanFlagsTraceFlagsMask is a mask for trace-flags.
//
// Bits 0-7 are used for trace flags. // Bits 0-7 are used for trace flags.
SpanFlagsTraceFlagsMask SpanFlags = 255 SpanFlagsTraceFlagsMask SpanFlags = 255
// Bits 8 and 9 are used to indicate that the parent span or link span is remote. // SpanFlagsContextHasIsRemoteMask is a mask for HAS_IS_REMOTE status.
// Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known. //
// Bit 9 (`IS_REMOTE`) indicates whether the span or link is remote. // Bits 8 and 9 are used to indicate that the parent span or link span is
// remote. Bit 8 (`HAS_IS_REMOTE`) indicates whether the value is known.
SpanFlagsContextHasIsRemoteMask SpanFlags = 256 SpanFlagsContextHasIsRemoteMask SpanFlags = 256
// SpanFlagsContextHasIsRemoteMask indicates the Span is remote. // SpanFlagsContextIsRemoteMask is a mask for IS_REMOTE status.
//
// Bits 8 and 9 are used to indicate that the parent span or link span is
// remote. Bit 9 (`IS_REMOTE`) indicates whether the span or link is
// remote.
SpanFlagsContextIsRemoteMask SpanFlags = 512 SpanFlagsContextIsRemoteMask SpanFlags = 512
) )
@ -263,26 +273,30 @@ const (
type SpanKind int32 type SpanKind int32
const ( const (
// Indicates that the span represents an internal operation within an application, // SpanKindInternal indicates that the span represents an internal
// as opposed to an operation happening at the boundaries. Default value. // operation within an application, as opposed to an operation happening at
// the boundaries.
SpanKindInternal SpanKind = 1 SpanKindInternal SpanKind = 1
// Indicates that the span covers server-side handling of an RPC or other // SpanKindServer indicates that the span covers server-side handling of an
// remote network request. // RPC or other remote network request.
SpanKindServer SpanKind = 2 SpanKindServer SpanKind = 2
// Indicates that the span describes a request to some remote service. // SpanKindClient indicates that the span describes a request to some
// remote service.
SpanKindClient SpanKind = 3 SpanKindClient SpanKind = 3
// Indicates that the span describes a producer sending a message to a broker. // SpanKindProducer indicates that the span describes a producer sending a
// Unlike CLIENT and SERVER, there is often no direct critical path latency relationship // message to a broker. Unlike SpanKindClient and SpanKindServer, there is
// between producer and consumer spans. A PRODUCER span ends when the message was accepted // often no direct critical path latency relationship between producer and
// by the broker while the logical processing of the message might span a much longer time. // consumer spans. A SpanKindProducer span ends when the message was
// accepted by the broker while the logical processing of the message might
// span a much longer time.
SpanKindProducer SpanKind = 4 SpanKindProducer SpanKind = 4
// Indicates that the span describes consumer receiving a message from a broker. // SpanKindConsumer indicates that the span describes a consumer receiving
// Like the PRODUCER kind, there is often no direct critical path latency relationship // a message from a broker. Like SpanKindProducer, there is often no direct
// between producer and consumer spans. // critical path latency relationship between producer and consumer spans.
SpanKindConsumer SpanKind = 5 SpanKindConsumer SpanKind = 5
) )
// Event is a time-stamped annotation of the span, consisting of user-supplied // SpanEvent is a time-stamped annotation of the span, consisting of user-supplied
// text description and key-value pairs. // text description and key-value pairs.
type SpanEvent struct { type SpanEvent struct {
// time_unix_nano is the time the event occurred. // time_unix_nano is the time the event occurred.
@ -312,7 +326,7 @@ func (e SpanEvent) MarshalJSON() ([]byte, error) {
Time uint64 `json:"timeUnixNano,omitempty"` Time uint64 `json:"timeUnixNano,omitempty"`
}{ }{
Alias: Alias(e), Alias: Alias(e),
Time: uint64(t), Time: uint64(t), //nolint:gosec // >0 checked above
}) })
} }
@ -347,7 +361,8 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error {
case "timeUnixNano", "time_unix_nano": case "timeUnixNano", "time_unix_nano":
var val protoUint64 var val protoUint64
err = decoder.Decode(&val) err = decoder.Decode(&val)
se.Time = time.Unix(0, int64(val.Uint64())) v := int64(min(val.Uint64(), math.MaxInt64)) //nolint:gosec // Overflow checked.
se.Time = time.Unix(0, v)
case "name": case "name":
err = decoder.Decode(&se.Name) err = decoder.Decode(&se.Name)
case "attributes": case "attributes":
@ -365,10 +380,11 @@ func (se *SpanEvent) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// A pointer from the current span to another span in the same trace or in a // SpanLink is a reference from the current span to another span in the same
// different trace. For example, this can be used in batching operations, // trace or in a different trace. For example, this can be used in batching
// where a single batch handler processes multiple requests from different // operations, where a single batch handler processes multiple requests from
// traces or when the handler receives a request from a different project. // different traces or when the handler receives a request from a different
// project.
type SpanLink struct { type SpanLink struct {
// A unique identifier of a trace that this linked span is part of. The ID is a // A unique identifier of a trace that this linked span is part of. The ID is a
// 16-byte array. // 16-byte array.

View File

@ -3,17 +3,19 @@
package telemetry package telemetry
// StatusCode is the status of a Span.
//
// For the semantics of status codes see // For the semantics of status codes see
// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status
type StatusCode int32 type StatusCode int32
const ( const (
// The default status. // StatusCodeUnset is the default status.
StatusCodeUnset StatusCode = 0 StatusCodeUnset StatusCode = 0
// The Span has been validated by an Application developer or Operator to // StatusCodeOK is used when the Span has been validated by an Application
// have completed successfully. // developer or Operator to have completed successfully.
StatusCodeOK StatusCode = 1 StatusCodeOK StatusCode = 1
// The Span contains an error. // StatusCodeError is used when the Span contains an error.
StatusCodeError StatusCode = 2 StatusCodeError StatusCode = 2
) )

View File

@ -71,7 +71,7 @@ func (td *Traces) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// A collection of ScopeSpans from a Resource. // ResourceSpans is a collection of ScopeSpans from a Resource.
type ResourceSpans struct { type ResourceSpans struct {
// The resource for the spans in this message. // The resource for the spans in this message.
// If this field is not set then no resource info is known. // If this field is not set then no resource info is known.
@ -128,7 +128,7 @@ func (rs *ResourceSpans) UnmarshalJSON(data []byte) error {
return nil return nil
} }
// A collection of Spans produced by an InstrumentationScope. // ScopeSpans is a collection of Spans produced by an InstrumentationScope.
type ScopeSpans struct { type ScopeSpans struct {
// The instrumentation scope information for the spans in this message. // The instrumentation scope information for the spans in this message.
// Semantically when InstrumentationScope isn't set, it is equivalent with // Semantically when InstrumentationScope isn't set, it is equivalent with

View File

@ -1,8 +1,6 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
//go:generate stringer -type=ValueKind -trimprefix=ValueKind
package telemetry package telemetry
import ( import (
@ -23,7 +21,7 @@ import (
// A zero value is valid and represents an empty value. // A zero value is valid and represents an empty value.
type Value struct { type Value struct {
// Ensure forward compatibility by explicitly making this not comparable. // Ensure forward compatibility by explicitly making this not comparable.
noCmp [0]func() //nolint: unused // This is indeed used. noCmp [0]func() //nolint:unused // This is indeed used.
// num holds the value for Int64, Float64, and Bool. It holds the length // num holds the value for Int64, Float64, and Bool. It holds the length
// for String, Bytes, Slice, Map. // for String, Bytes, Slice, Map.
@ -92,7 +90,7 @@ func IntValue(v int) Value { return Int64Value(int64(v)) }
// Int64Value returns a [Value] for an int64. // Int64Value returns a [Value] for an int64.
func Int64Value(v int64) Value { func Int64Value(v int64) Value {
return Value{num: uint64(v), any: ValueKindInt64} return Value{num: uint64(v), any: ValueKindInt64} //nolint:gosec // Raw value conv.
} }
// Float64Value returns a [Value] for a float64. // Float64Value returns a [Value] for a float64.
@ -164,7 +162,7 @@ func (v Value) AsInt64() int64 {
// this will return garbage. // this will return garbage.
func (v Value) asInt64() int64 { func (v Value) asInt64() int64 {
// Assumes v.num was a valid int64 (overflow not checked). // Assumes v.num was a valid int64 (overflow not checked).
return int64(v.num) // nolint: gosec return int64(v.num) //nolint:gosec // Bounded.
} }
// AsBool returns the value held by v as a bool. // AsBool returns the value held by v as a bool.
@ -309,13 +307,13 @@ func (v Value) String() string {
return v.asString() return v.asString()
case ValueKindInt64: case ValueKindInt64:
// Assumes v.num was a valid int64 (overflow not checked). // Assumes v.num was a valid int64 (overflow not checked).
return strconv.FormatInt(int64(v.num), 10) // nolint: gosec return strconv.FormatInt(int64(v.num), 10) //nolint:gosec // Bounded.
case ValueKindFloat64: case ValueKindFloat64:
return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64) return strconv.FormatFloat(v.asFloat64(), 'g', -1, 64)
case ValueKindBool: case ValueKindBool:
return strconv.FormatBool(v.asBool()) return strconv.FormatBool(v.asBool())
case ValueKindBytes: case ValueKindBytes:
return fmt.Sprint(v.asBytes()) return string(v.asBytes())
case ValueKindMap: case ValueKindMap:
return fmt.Sprint(v.asMap()) return fmt.Sprint(v.asMap())
case ValueKindSlice: case ValueKindSlice:
@ -343,7 +341,7 @@ func (v *Value) MarshalJSON() ([]byte, error) {
case ValueKindInt64: case ValueKindInt64:
return json.Marshal(struct { return json.Marshal(struct {
Value string `json:"intValue"` Value string `json:"intValue"`
}{strconv.FormatInt(int64(v.num), 10)}) }{strconv.FormatInt(int64(v.num), 10)}) //nolint:gosec // Raw value conv.
case ValueKindFloat64: case ValueKindFloat64:
return json.Marshal(struct { return json.Marshal(struct {
Value float64 `json:"doubleValue"` Value float64 `json:"doubleValue"`

View File

@ -6,6 +6,7 @@ package sdk
import ( import (
"encoding/json" "encoding/json"
"fmt" "fmt"
"math"
"reflect" "reflect"
"runtime" "runtime"
"strings" "strings"
@ -16,7 +17,7 @@ import (
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/codes"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0" semconv "go.opentelemetry.io/otel/semconv/v1.37.0"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop" "go.opentelemetry.io/otel/trace/noop"
@ -85,7 +86,12 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
limit := maxSpan.Attrs limit := maxSpan.Attrs
if limit == 0 { if limit == 0 {
// No attributes allowed. // No attributes allowed.
s.span.DroppedAttrs += uint32(len(attrs)) n := int64(len(attrs))
if n > 0 {
s.span.DroppedAttrs += uint32( //nolint:gosec // Bounds checked.
min(n, math.MaxUint32),
)
}
return return
} }
@ -121,8 +127,13 @@ func (s *span) SetAttributes(attrs ...attribute.KeyValue) {
// convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The // convCappedAttrs converts up to limit attrs into a []telemetry.Attr. The
// number of dropped attributes is also returned. // number of dropped attributes is also returned.
func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) { func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, uint32) {
n := len(attrs)
if limit == 0 { if limit == 0 {
return nil, uint32(len(attrs)) var out uint32
if n > 0 {
out = uint32(min(int64(n), math.MaxUint32)) //nolint:gosec // Bounds checked.
}
return nil, out
} }
if limit < 0 { if limit < 0 {
@ -130,8 +141,12 @@ func convCappedAttrs(limit int, attrs []attribute.KeyValue) ([]telemetry.Attr, u
return convAttrs(attrs), 0 return convAttrs(attrs), 0
} }
limit = min(len(attrs), limit) if n < 0 {
return convAttrs(attrs[:limit]), uint32(len(attrs) - limit) n = 0
}
limit = min(n, limit)
return convAttrs(attrs[:limit]), uint32(n - limit) //nolint:gosec // Bounds checked.
} }
func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr { func convAttrs(attrs []attribute.KeyValue) []telemetry.Attr {

View File

@ -5,6 +5,7 @@ package sdk
import ( import (
"context" "context"
"math"
"time" "time"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
@ -21,15 +22,20 @@ type tracer struct {
var _ trace.Tracer = tracer{} var _ trace.Tracer = tracer{}
func (t tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { func (t tracer) Start(
var psc trace.SpanContext ctx context.Context,
name string,
opts ...trace.SpanStartOption,
) (context.Context, trace.Span) {
var psc, sc trace.SpanContext
sampled := true sampled := true
span := new(span) span := new(span)
// Ask eBPF for sampling decision and span context info. // Ask eBPF for sampling decision and span context info.
t.start(ctx, span, &psc, &sampled, &span.spanContext) t.start(ctx, span, &psc, &sampled, &sc)
span.sampled.Store(sampled) span.sampled.Store(sampled)
span.spanContext = sc
ctx = trace.ContextWithSpan(ctx, span) ctx = trace.ContextWithSpan(ctx, span)
@ -58,7 +64,13 @@ func (t *tracer) start(
// start is used for testing. // start is used for testing.
var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {} var start = func(context.Context, *span, *trace.SpanContext, *bool, *trace.SpanContext) {}
func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanContext) (*telemetry.Traces, *telemetry.Span) { var intToUint32Bound = min(math.MaxInt, math.MaxUint32)
func (t tracer) traces(
name string,
cfg trace.SpanConfig,
sc, psc trace.SpanContext,
) (*telemetry.Traces, *telemetry.Span) {
span := &telemetry.Span{ span := &telemetry.Span{
TraceID: telemetry.TraceID(sc.TraceID()), TraceID: telemetry.TraceID(sc.TraceID()),
SpanID: telemetry.SpanID(sc.SpanID()), SpanID: telemetry.SpanID(sc.SpanID()),
@ -73,11 +85,16 @@ func (t tracer) traces(name string, cfg trace.SpanConfig, sc, psc trace.SpanCont
links := cfg.Links() links := cfg.Links()
if limit := maxSpan.Links; limit == 0 { if limit := maxSpan.Links; limit == 0 {
span.DroppedLinks = uint32(len(links)) n := len(links)
if n > 0 {
bounded := max(min(n, intToUint32Bound), 0)
span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked.
}
} else { } else {
if limit > 0 { if limit > 0 {
n := max(len(links)-limit, 0) n := max(len(links)-limit, 0)
span.DroppedLinks = uint32(n) bounded := min(n, intToUint32Bound)
span.DroppedLinks = uint32(bounded) //nolint:gosec // Bounds checked.
links = links[n:] links = links[n:]
} }
span.Links = convLinks(links) span.Links = convLinks(links)

3
vendor/go.opentelemetry.io/otel/.clomonitor.yml generated vendored Normal file
View File

@ -0,0 +1,3 @@
exemptions:
- check: artifacthub_badge
reason: "Artifact Hub doesn't support Go packages"

View File

@ -7,3 +7,5 @@ ans
nam nam
valu valu
thirdparty thirdparty
addOpt
observ

View File

@ -1,252 +1,267 @@
# See https://github.com/golangci/golangci-lint#config-file version: "2"
run: run:
issues-exit-code: 1 #Default issues-exit-code: 1
tests: true #Default tests: true
linters: linters:
# Disable everything by default so upgrades to not include new "default default: none
# enabled" linters.
disable-all: true
# Specifically enable linters we want to use.
enable: enable:
- asasalint - asasalint
- bodyclose - bodyclose
- depguard - depguard
- errcheck - errcheck
- errorlint - errorlint
- gocritic
- godot - godot
- gofumpt
- goimports
- gosec - gosec
- gosimple
- govet - govet
- ineffassign - ineffassign
- misspell - misspell
- modernize
- perfsprint - perfsprint
- revive - revive
- staticcheck - staticcheck
- testifylint - testifylint
- typecheck
- unconvert - unconvert
- unused
- unparam - unparam
- unused
- usestdlibvars - usestdlibvars
- usetesting - usetesting
settings:
depguard:
rules:
auto/sdk:
files:
- '!internal/global/trace.go'
- ~internal/global/trace_test.go
deny:
- pkg: go.opentelemetry.io/auto/sdk
desc: Do not use SDK from automatic instrumentation.
non-tests:
files:
- '!$test'
- '!**/*test/*.go'
- '!**/internal/matchers/*.go'
deny:
- pkg: testing
- pkg: github.com/stretchr/testify
- pkg: crypto/md5
- pkg: crypto/sha1
- pkg: crypto/**/pkix
otel-internal:
files:
- '**/sdk/*.go'
- '**/sdk/**/*.go'
- '**/exporters/*.go'
- '**/exporters/**/*.go'
- '**/schema/*.go'
- '**/schema/**/*.go'
- '**/metric/*.go'
- '**/metric/**/*.go'
- '**/bridge/*.go'
- '**/bridge/**/*.go'
- '**/trace/*.go'
- '**/trace/**/*.go'
- '**/log/*.go'
- '**/log/**/*.go'
deny:
- pkg: go.opentelemetry.io/otel/internal$
desc: Do not use cross-module internal packages.
- pkg: go.opentelemetry.io/otel/internal/internaltest
desc: Do not use cross-module internal packages.
otlp-internal:
files:
- '!**/exporters/otlp/internal/**/*.go'
deny:
- pkg: go.opentelemetry.io/otel/exporters/otlp/internal
desc: Do not use cross-module internal packages.
otlpmetric-internal:
files:
- '!**/exporters/otlp/otlpmetric/internal/*.go'
- '!**/exporters/otlp/otlpmetric/internal/**/*.go'
deny:
- pkg: go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal
desc: Do not use cross-module internal packages.
otlptrace-internal:
files:
- '!**/exporters/otlp/otlptrace/*.go'
- '!**/exporters/otlp/otlptrace/internal/**.go'
deny:
- pkg: go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal
desc: Do not use cross-module internal packages.
gocritic:
disabled-checks:
- appendAssign
- commentedOutCode
- dupArg
- hugeParam
- importShadow
- preferDecodeRune
- rangeValCopy
- unnamedResult
- whyNoLint
enable-all: true
godot:
exclude:
# Exclude links.
- '^ *\[[^]]+\]:'
# Exclude sentence fragments for lists.
- ^[ ]*[-•]
# Exclude sentences prefixing a list.
- :$
misspell:
locale: US
ignore-rules:
- cancelled
modernize:
disable:
- omitzero
perfsprint:
int-conversion: true
err-error: true
errorf: true
sprintf1: true
strconcat: true
revive:
confidence: 0.01
rules:
- name: blank-imports
- name: bool-literal-in-expr
- name: constant-logical-expr
- name: context-as-argument
arguments:
- allowTypesBefore: '*testing.T'
disabled: true
- name: context-keys-type
- name: deep-exit
- name: defer
arguments:
- - call-chain
- loop
- name: dot-imports
- name: duplicated-imports
- name: early-return
arguments:
- preserveScope
- name: empty-block
- name: empty-lines
- name: error-naming
- name: error-return
- name: error-strings
- name: errorf
- name: exported
arguments:
- sayRepetitiveInsteadOfStutters
- name: flag-parameter
- name: identical-branches
- name: if-return
- name: import-shadowing
- name: increment-decrement
- name: indent-error-flow
arguments:
- preserveScope
- name: package-comments
- name: range
- name: range-val-in-closure
- name: range-val-address
- name: redefines-builtin-id
- name: string-format
arguments:
- - panic
- /^[^\n]*$/
- must not contain line breaks
- name: struct-tag
- name: superfluous-else
arguments:
- preserveScope
- name: time-equal
- name: unconditional-recursion
- name: unexported-return
- name: unhandled-error
arguments:
- fmt.Fprint
- fmt.Fprintf
- fmt.Fprintln
- fmt.Print
- fmt.Printf
- fmt.Println
- name: unused-parameter
- name: unused-receiver
- name: unnecessary-stmt
- name: use-any
- name: useless-break
- name: var-declaration
- name: var-naming
arguments:
- ["ID"] # AllowList
- ["Otel", "Aws", "Gcp"] # DenyList
- name: waitgroup-by-value
testifylint:
enable-all: true
disable:
- float-compare
- go-require
- require-error
usetesting:
context-background: true
context-todo: true
exclusions:
generated: lax
presets:
- common-false-positives
- legacy
- std-error-handling
rules:
- linters:
- revive
path: schema/v.*/types/.*
text: avoid meaningless package names
# TODO: Having appropriate comments for exported objects helps development,
# even for objects in internal packages. Appropriate comments for all
# exported objects should be added and this exclusion removed.
- linters:
- revive
path: .*internal/.*
text: exported (method|function|type|const) (.+) should have comment or be unexported
# Yes, they are, but it's okay in a test.
- linters:
- revive
path: _test\.go
text: exported func.*returns unexported type.*which can be annoying to use
# Example test functions should be treated like main.
- linters:
- revive
path: example.*_test\.go
text: calls to (.+) only in main[(][)] or init[(][)] functions
# It's okay to not run gosec and perfsprint in a test.
- linters:
- gosec
- perfsprint
path: _test\.go
# Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
# as we commonly use it in tests and examples.
- linters:
- gosec
text: 'G404:'
# Ignoring gosec G402: TLS MinVersion too low
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
- linters:
- gosec
text: 'G402: TLS MinVersion too low.'
issues: issues:
# Maximum issues count per one linter.
# Set to 0 to disable.
# Default: 50
# Setting to unlimited so the linter only is run once to debug all issues.
max-issues-per-linter: 0 max-issues-per-linter: 0
# Maximum count of issues with the same text.
# Set to 0 to disable.
# Default: 3
# Setting to unlimited so the linter only is run once to debug all issues.
max-same-issues: 0 max-same-issues: 0
# Excluding configuration per-path, per-linter, per-text and per-source. formatters:
exclude-rules: enable:
# TODO: Having appropriate comments for exported objects helps development, - gofumpt
# even for objects in internal packages. Appropriate comments for all - goimports
# exported objects should be added and this exclusion removed. - golines
- path: '.*internal/.*' settings:
text: "exported (method|function|type|const) (.+) should have comment or be unexported" gofumpt:
linters: extra-rules: true
- revive goimports:
# Yes, they are, but it's okay in a test. local-prefixes:
- path: _test\.go - go.opentelemetry.io/otel
text: "exported func.*returns unexported type.*which can be annoying to use" golines:
linters: max-len: 120
- revive exclusions:
# Example test functions should be treated like main. generated: lax
- path: example.*_test\.go
text: "calls to (.+) only in main[(][)] or init[(][)] functions"
linters:
- revive
# It's okay to not run gosec and perfsprint in a test.
- path: _test\.go
linters:
- gosec
- perfsprint
# Ignoring gosec G404: Use of weak random number generator (math/rand instead of crypto/rand)
# as we commonly use it in tests and examples.
- text: "G404:"
linters:
- gosec
# Ignoring gosec G402: TLS MinVersion too low
# as the https://pkg.go.dev/crypto/tls#Config handles MinVersion default well.
- text: "G402: TLS MinVersion too low."
linters:
- gosec
include:
# revive exported should have comment or be unexported.
- EXC0012
# revive package comment should be of the form ...
- EXC0013
linters-settings:
depguard:
rules:
non-tests:
files:
- "!$test"
- "!**/*test/*.go"
- "!**/internal/matchers/*.go"
deny:
- pkg: "testing"
- pkg: "github.com/stretchr/testify"
- pkg: "crypto/md5"
- pkg: "crypto/sha1"
- pkg: "crypto/**/pkix"
auto/sdk:
files:
- "!internal/global/trace.go"
- "~internal/global/trace_test.go"
deny:
- pkg: "go.opentelemetry.io/auto/sdk"
desc: Do not use SDK from automatic instrumentation.
otlp-internal:
files:
- "!**/exporters/otlp/internal/**/*.go"
deny:
- pkg: "go.opentelemetry.io/otel/exporters/otlp/internal"
desc: Do not use cross-module internal packages.
otlptrace-internal:
files:
- "!**/exporters/otlp/otlptrace/*.go"
- "!**/exporters/otlp/otlptrace/internal/**.go"
deny:
- pkg: "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal"
desc: Do not use cross-module internal packages.
otlpmetric-internal:
files:
- "!**/exporters/otlp/otlpmetric/internal/*.go"
- "!**/exporters/otlp/otlpmetric/internal/**/*.go"
deny:
- pkg: "go.opentelemetry.io/otel/exporters/otlp/otlpmetric/internal"
desc: Do not use cross-module internal packages.
otel-internal:
files:
- "**/sdk/*.go"
- "**/sdk/**/*.go"
- "**/exporters/*.go"
- "**/exporters/**/*.go"
- "**/schema/*.go"
- "**/schema/**/*.go"
- "**/metric/*.go"
- "**/metric/**/*.go"
- "**/bridge/*.go"
- "**/bridge/**/*.go"
- "**/trace/*.go"
- "**/trace/**/*.go"
- "**/log/*.go"
- "**/log/**/*.go"
deny:
- pkg: "go.opentelemetry.io/otel/internal$"
desc: Do not use cross-module internal packages.
- pkg: "go.opentelemetry.io/otel/internal/attribute"
desc: Do not use cross-module internal packages.
- pkg: "go.opentelemetry.io/otel/internal/internaltest"
desc: Do not use cross-module internal packages.
- pkg: "go.opentelemetry.io/otel/internal/matchers"
desc: Do not use cross-module internal packages.
godot:
exclude:
# Exclude links.
- '^ *\[[^]]+\]:'
# Exclude sentence fragments for lists.
- '^[ ]*[-•]'
# Exclude sentences prefixing a list.
- ':$'
goimports:
local-prefixes: go.opentelemetry.io
misspell:
locale: US
ignore-words:
- cancelled
perfsprint:
err-error: true
errorf: true
int-conversion: true
sprintf1: true
strconcat: true
revive:
# Sets the default failure confidence.
# This means that linting errors with less than 0.8 confidence will be ignored.
# Default: 0.8
confidence: 0.01
# https://github.com/mgechev/revive/blob/master/RULES_DESCRIPTIONS.md
rules:
- name: blank-imports
- name: bool-literal-in-expr
- name: constant-logical-expr
- name: context-as-argument
disabled: true
arguments:
- allowTypesBefore: "*testing.T"
- name: context-keys-type
- name: deep-exit
- name: defer
arguments:
- ["call-chain", "loop"]
- name: dot-imports
- name: duplicated-imports
- name: early-return
arguments:
- "preserveScope"
- name: empty-block
- name: empty-lines
- name: error-naming
- name: error-return
- name: error-strings
- name: errorf
- name: exported
arguments:
- "sayRepetitiveInsteadOfStutters"
- name: flag-parameter
- name: identical-branches
- name: if-return
- name: import-shadowing
- name: increment-decrement
- name: indent-error-flow
arguments:
- "preserveScope"
- name: package-comments
- name: range
- name: range-val-in-closure
- name: range-val-address
- name: redefines-builtin-id
- name: string-format
arguments:
- - panic
- '/^[^\n]*$/'
- must not contain line breaks
- name: struct-tag
- name: superfluous-else
arguments:
- "preserveScope"
- name: time-equal
- name: unconditional-recursion
- name: unexported-return
- name: unhandled-error
arguments:
- "fmt.Fprint"
- "fmt.Fprintf"
- "fmt.Fprintln"
- "fmt.Print"
- "fmt.Printf"
- "fmt.Println"
- name: unnecessary-stmt
- name: useless-break
- name: var-declaration
- name: var-naming
arguments:
- ["ID"] # AllowList
- ["Otel", "Aws", "Gcp"] # DenyList
- name: waitgroup-by-value
testifylint:
enable-all: true
disable:
- float-compare
- go-require
- require-error

View File

@ -1,6 +1,13 @@
http://localhost http://localhost
https://localhost
http://jaeger-collector http://jaeger-collector
https://github.com/open-telemetry/opentelemetry-go/milestone/ https://github.com/open-telemetry/opentelemetry-go/milestone/
https://github.com/open-telemetry/opentelemetry-go/projects https://github.com/open-telemetry/opentelemetry-go/projects
# Weaver model URL for semantic-conventions repository.
https?:\/\/github\.com\/open-telemetry\/semantic-conventions\/archive\/refs\/tags\/[^.]+\.zip\[[^]]+]
file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries file:///home/runner/work/opentelemetry-go/opentelemetry-go/libraries
file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual file:///home/runner/work/opentelemetry-go/opentelemetry-go/manual
http://4.3.2.1:78/user/123
file:///home/runner/work/opentelemetry-go/opentelemetry-go/exporters/otlp/otlptrace/otlptracegrpc/internal/observ/dns:/:4317
# URL works, but it has blocked link checkers.
https://dl.acm.org/doi/10.1145/198429.198435

View File

@ -11,6 +11,304 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
<!-- Released section --> <!-- Released section -->
<!-- Don't change this section unless doing release --> <!-- Don't change this section unless doing release -->
## [1.40.0/0.62.0/0.16.0] 2026-02-02
### Added
- Add `AlwaysRecord` sampler in `go.opentelemetry.io/otel/sdk/trace`. (#7724)
- Add `Enabled` method to all synchronous instrument interfaces (`Float64Counter`, `Float64UpDownCounter`, `Float64Histogram`, `Float64Gauge`, `Int64Counter`, `Int64UpDownCounter`, `Int64Histogram`, `Int64Gauge`,) in `go.opentelemetry.io/otel/metric`.
This stabilizes the synchronous instrument enabled feature, allowing users to check if an instrument will process measurements before performing computationally expensive operations. (#7763)
- Add `go.opentelemetry.io/otel/semconv/v1.39.0` package.
The package contains semantic conventions from the `v1.39.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.39.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.38.0.` (#7783, #7789)
### Changed
- Improve the concurrent performance of `HistogramReservoir` in `go.opentelemetry.io/otel/sdk/metric/exemplar` by 4x. (#7443)
- Improve the concurrent performance of `FixedSizeReservoir` in `go.opentelemetry.io/otel/sdk/metric/exemplar`. (#7447)
- Improve performance of concurrent histogram measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7474)
- Improve performance of concurrent synchronous gauge measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7478)
- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`. (#7492)
- `Exporter` in `go.opentelemetry.io/otel/exporters/prometheus` ignores metrics with the scope `go.opentelemetry.io/contrib/bridges/prometheus`.
This prevents scrape failures when the Prometheus exporter is misconfigured to get data from the Prometheus bridge. (#7688)
- Improve performance of concurrent exponential histogram measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7702)
- The `rpc.grpc.status_code` attribute in the experimental metrics emitted from `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` is replaced with the `rpc.response.status_code` attribute to align with the semantic conventions. (#7854)
- The `rpc.grpc.status_code` attribute in the experimental metrics emitted from `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` is replaced with the `rpc.response.status_code` attribute to align with the semantic conventions. (#7854)
### Fixed
- Fix bad log message when key-value pairs are dropped because of key duplication in `go.opentelemetry.io/otel/sdk/log`. (#7662)
- Fix `DroppedAttributes` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not count the non-attribute key-value pairs dropped because of key duplication. (#7662)
- Fix `SetAttributes` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not log that attributes are dropped when they are actually not dropped. (#7662)
- Fix missing `request.GetBody` in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` to correctly handle HTTP/2 `GOAWAY` frame. (#7794)
- `WithHostID` detector in `go.opentelemetry.io/otel/sdk/resource` to use full path for `ioreg` command on Darwin (macOS). (#7818)
### Deprecated
- Deprecate `go.opentelemetry.io/otel/exporters/zipkin`.
For more information, see the [OTel blog post deprecating the Zipkin exporter](https://opentelemetry.io/blog/2025/deprecating-zipkin-exporters/). (#7670)
## [1.39.0/0.61.0/0.15.0/0.0.14] 2025-12-05
### Added
- Greatly reduce the cost of recording metrics in `go.opentelemetry.io/otel/sdk/metric` using hashing for map keys. (#7175)
- Add `WithInstrumentationAttributeSet` option to `go.opentelemetry.io/otel/log`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/trace` packages.
This provides a concurrent-safe and performant alternative to `WithInstrumentationAttributes` by accepting a pre-constructed `attribute.Set`. (#7287)
- Add experimental observability for the Prometheus exporter in `go.opentelemetry.io/otel/exporters/prometheus`.
Check the `go.opentelemetry.io/otel/exporters/prometheus/internal/x` package documentation for more information. (#7345)
- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7353)
- Add temporality selector functions `DeltaTemporalitySelector`, `CumulativeTemporalitySelector`, `LowMemoryTemporalitySelector` to `go.opentelemetry.io/otel/sdk/metric`. (#7434)
- Add experimental observability metrics for simple log processor in `go.opentelemetry.io/otel/sdk/log`. (#7548)
- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7459)
- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7486)
- Add experimental observability metrics for simple span processor in `go.opentelemetry.io/otel/sdk/trace`. (#7374)
- Add experimental observability metrics in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7512)
- Add experimental observability metrics for manual reader in `go.opentelemetry.io/otel/sdk/metric`. (#7524)
- Add experimental observability metrics for periodic reader in `go.opentelemetry.io/otel/sdk/metric`. (#7571)
- Support `OTEL_EXPORTER_OTLP_LOGS_INSECURE` and `OTEL_EXPORTER_OTLP_INSECURE` environmental variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7608)
- Add `Enabled` method to the `Processor` interface in `go.opentelemetry.io/otel/sdk/log`.
All `Processor` implementations now include an `Enabled` method. (#7639)
- The `go.opentelemetry.io/otel/semconv/v1.38.0` package.
The package contains semantic conventions from the `v1.38.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.38.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.37.0.`(#7648)
### Changed
- `Distinct` in `go.opentelemetry.io/otel/attribute` is no longer guaranteed to uniquely identify an attribute set.
Collisions between `Distinct` values for different Sets are possible with extremely high cardinality (billions of series per instrument), but are highly unlikely. (#7175)
- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/trace` synchronously de-duplicates the passed attributes instead of delegating it to the returned `TracerOption`. (#7266)
- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/meter` synchronously de-duplicates the passed attributes instead of delegating it to the returned `MeterOption`. (#7266)
- `WithInstrumentationAttributes` in `go.opentelemetry.io/otel/log` synchronously de-duplicates the passed attributes instead of delegating it to the returned `LoggerOption`. (#7266)
- Rename the `OTEL_GO_X_SELF_OBSERVABILITY` environment variable to `OTEL_GO_X_OBSERVABILITY` in `go.opentelemetry.io/otel/sdk/trace`, `go.opentelemetry.io/otel/sdk/log`, and `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7302)
- Improve performance of histogram `Record` in `go.opentelemetry.io/otel/sdk/metric` when min and max are disabled using `NoMinMax`. (#7306)
- Improve error handling for dropped data during translation by using `prometheus.NewInvalidMetric` in `go.opentelemetry.io/otel/exporters/prometheus`.
⚠️ **Breaking Change:** Previously, these cases were only logged and scrapes succeeded.
Now, when translation would drop data (e.g., invalid label/value), the exporter emits a `NewInvalidMetric`, and Prometheus scrapes **fail with HTTP 500** by default.
To preserve the prior behavior (scrapes succeed while errors are logged), configure your Prometheus HTTP handler with: `promhttp.HandlerOpts{ ErrorHandling: promhttp.ContinueOnError }`. (#7363)
- Replace fnv hash with xxhash in `go.opentelemetry.io/otel/attribute` for better performance. (#7371)
- The default `TranslationStrategy` in `go.opentelemetry.io/exporters/prometheus` is changed from `otlptranslator.NoUTF8EscapingWithSuffixes` to `otlptranslator.UnderscoreEscapingWithSuffixes`. (#7421)
- Improve performance of concurrent measurements in `go.opentelemetry.io/otel/sdk/metric`. (#7427)
- Include W3C TraceFlags (bits 07) in the OTLP `Span.Flags` field in `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracehttp` and `go.opentelemetry.io/exporters/otlp/otlptrace/otlptracegrpc`. (#7438)
- The `ErrorType` function in `go.opentelemetry.io/otel/semconv/v1.37.0` now handles custom error types.
If an error implements an `ErrorType() string` method, the return value of that method will be used as the error type. (#7442)
### Fixed
- Fix `WithInstrumentationAttributes` options in `go.opentelemetry.io/otel/trace`, `go.opentelemetry.io/otel/metric`, and `go.opentelemetry.io/otel/log` to properly merge attributes when passed multiple times instead of replacing them.
Attributes with duplicate keys will use the last value passed. (#7300)
- The equality of `attribute.Set` when using the `Equal` method is not affected by the user overriding the empty set pointed to by `attribute.EmptySet` in `go.opentelemetry.io/otel/attribute`. (#7357)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#7372)
- Return partial OTLP export errors to the caller in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#7372)
- Fix `AddAttributes`, `SetAttributes`, `SetBody` on `Record` in `go.opentelemetry.io/otel/sdk/log` to not mutate input. (#7403)
- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.37.0`. (#7655)
- Do not double record measurements of `RecordSet` methods in `go.opentelemetry.io/otel/semconv/v1.36.0`. (#7656)
### Removed
- Drop support for [Go 1.23]. (#7274)
- Remove the `FilterProcessor` interface in `go.opentelemetry.io/otel/sdk/log`.
The `Enabled` method has been added to the `Processor` interface instead.
All `Processor` implementations must now implement the `Enabled` method.
Custom processors that do not filter records can implement `Enabled` to return `true`. (#7639)
## [1.38.0/0.60.0/0.14.0/0.0.13] 2025-08-29
This release is the last to support [Go 1.23].
The next release will require at least [Go 1.24].
### Added
- Add native histogram exemplar support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6772)
- Add template attribute functions to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6939)
- `ContainerLabel`
- `DBOperationParameter`
- `DBSystemParameter`
- `HTTPRequestHeader`
- `HTTPResponseHeader`
- `K8SCronJobAnnotation`
- `K8SCronJobLabel`
- `K8SDaemonSetAnnotation`
- `K8SDaemonSetLabel`
- `K8SDeploymentAnnotation`
- `K8SDeploymentLabel`
- `K8SJobAnnotation`
- `K8SJobLabel`
- `K8SNamespaceAnnotation`
- `K8SNamespaceLabel`
- `K8SNodeAnnotation`
- `K8SNodeLabel`
- `K8SPodAnnotation`
- `K8SPodLabel`
- `K8SReplicaSetAnnotation`
- `K8SReplicaSetLabel`
- `K8SStatefulSetAnnotation`
- `K8SStatefulSetLabel`
- `ProcessEnvironmentVariable`
- `RPCConnectRPCRequestMetadata`
- `RPCConnectRPCResponseMetadata`
- `RPCGRPCRequestMetadata`
- `RPCGRPCResponseMetadata`
- Add `ErrorType` attribute helper function to the `go.opentelmetry.io/otel/semconv/v1.34.0` package. (#6962)
- Add `WithAllowKeyDuplication` in `go.opentelemetry.io/otel/sdk/log` which can be used to disable deduplication for log records. (#6968)
- Add `WithCardinalityLimit` option to configure the cardinality limit in `go.opentelemetry.io/otel/sdk/metric`. (#6996, #7065, #7081, #7164, #7165, #7179)
- Add `Clone` method to `Record` in `go.opentelemetry.io/otel/log` that returns a copy of the record with no shared state. (#7001)
- Add experimental self-observability span and batch span processor metrics in `go.opentelemetry.io/otel/sdk/trace`.
Check the `go.opentelemetry.io/otel/sdk/trace/internal/x` package documentation for more information. (#7027, #6393, #7209)
- The `go.opentelemetry.io/otel/semconv/v1.36.0` package.
The package contains semantic conventions from the `v1.36.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.36.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.34.0.`(#7032, #7041)
- Add support for configuring Prometheus name translation using `WithTranslationStrategy` option in `go.opentelemetry.io/otel/exporters/prometheus`. The current default translation strategy when UTF-8 mode is enabled is `NoUTF8EscapingWithSuffixes`, but a future release will change the default strategy to `UnderscoreEscapingWithSuffixes` for compliance with the specification. (#7111)
- Add experimental self-observability log metrics in `go.opentelemetry.io/otel/sdk/log`.
Check the `go.opentelemetry.io/otel/sdk/log/internal/x` package documentation for more information. (#7121)
- Add experimental self-observability trace exporter metrics in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`.
Check the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace/internal/x` package documentation for more information. (#7133)
- Support testing of [Go 1.25]. (#7187)
- The `go.opentelemetry.io/otel/semconv/v1.37.0` package.
The package contains semantic conventions from the `v1.37.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.37.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.36.0.`(#7254)
### Changed
- Optimize `TraceIDFromHex` and `SpanIDFromHex` in `go.opentelemetry.io/otel/sdk/trace`. (#6791)
- Change `AssertEqual` in `go.opentelemetry.io/otel/log/logtest` to accept `TestingT` in order to support benchmarks and fuzz tests. (#6908)
- Change `DefaultExemplarReservoirProviderSelector` in `go.opentelemetry.io/otel/sdk/metric` to use `runtime.GOMAXPROCS(0)` instead of `runtime.NumCPU()` for the `FixedSizeReservoirProvider` default size. (#7094)
### Fixed
- `SetBody` method of `Record` in `go.opentelemetry.io/otel/sdk/log` now deduplicates key-value collections (`log.Value` of `log.KindMap` from `go.opentelemetry.io/otel/log`). (#7002)
- Fix `go.opentelemetry.io/otel/exporters/prometheus` to not append a suffix if it's already present in metric name. (#7088)
- Fix the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` self-observability component type and name. (#7195)
- Fix partial export count metric in `go.opentelemetry.io/otel/exporters/stdout/stdouttrace`. (#7199)
### Deprecated
- Deprecate `WithoutUnits` and `WithoutCounterSuffixes` options, preferring `WithTranslationStrategy` instead. (#7111)
- Deprecate support for `OTEL_GO_X_CARDINALITY_LIMIT` environment variable in `go.opentelemetry.io/otel/sdk/metric`. Use `WithCardinalityLimit` option instead. (#7166)
## [0.59.1] 2025-07-21
### Changed
- Retract `v0.59.0` release of `go.opentelemetry.io/otel/exporters/prometheus` module which appends incorrect unit suffixes. (#7046)
- Change `go.opentelemetry.io/otel/exporters/prometheus` to no longer deduplicate suffixes when UTF8 is enabled.
It is recommended to disable unit and counter suffixes in the exporter, and manually add suffixes if you rely on the existing behavior. (#7044)
### Fixed
- Fix `go.opentelemetry.io/otel/exporters/prometheus` to properly handle unit suffixes when the unit is in brackets.
E.g. `{spans}`. (#7044)
## [1.37.0/0.59.0/0.13.0] 2025-06-25
### Added
- The `go.opentelemetry.io/otel/semconv/v1.33.0` package.
The package contains semantic conventions from the `v1.33.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.33.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.32.0.`(#6799)
- The `go.opentelemetry.io/otel/semconv/v1.34.0` package.
The package contains semantic conventions from the `v1.34.0` version of the OpenTelemetry Semantic Conventions. (#6812)
- Add metric's schema URL as `otel_scope_schema_url` label in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947)
- Add metric's scope attributes as `otel_scope_[attribute]` labels in `go.opentelemetry.io/otel/exporters/prometheus`. (#5947)
- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/log`. (#6825)
- Add `EventName` to `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6825)
- Changed handling of `go.opentelemetry.io/otel/exporters/prometheus` metric renaming to add unit suffixes when it doesn't match one of the pre-defined values in the unit suffix map. (#6839)
### Changed
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/bridge/opentracing`. (#6827)
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/exporters/zipkin`. (#6829)
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/metric`. (#6832)
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/resource`. (#6834)
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/sdk/trace`. (#6835)
- The semantic conventions have been upgraded from `v1.26.0` to `v1.34.0` in `go.opentelemetry.io/otel/trace`. (#6836)
- `Record.Resource` now returns `*resource.Resource` instead of `resource.Resource` in `go.opentelemetry.io/otel/sdk/log`. (#6864)
- Retry now shows error cause for context timeout in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`, `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`, `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`, `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6898)
### Fixed
- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc`. (#6710)
- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6710)
- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#6710)
- Stop stripping trailing slashes from configured endpoint URL in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6710)
- Validate exponential histogram scale range for Prometheus compatibility in `go.opentelemetry.io/otel/exporters/prometheus`. (#6822)
- Context cancellation during metric pipeline produce does not corrupt data in `go.opentelemetry.io/otel/sdk/metric`. (#6914)
### Removed
- `go.opentelemetry.io/otel/exporters/prometheus` no longer exports `otel_scope_info` metric. (#6770)
## [0.12.2] 2025-05-22
### Fixed
- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` module that contains invalid dependencies. (#6804)
- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp` module that contains invalid dependencies. (#6804)
- Retract `v0.12.0` release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog` module that contains invalid dependencies. (#6804)
## [0.12.1] 2025-05-21
### Fixes
- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc`. (#6800)
- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6800)
- Use the proper dependency version of `go.opentelemetry.io/otel/sdk/log/logtest` in `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`. (#6800)
## [1.36.0/0.58.0/0.12.0] 2025-05-20
### Added
- Add exponential histogram support in `go.opentelemetry.io/otel/exporters/prometheus`. (#6421)
- The `go.opentelemetry.io/otel/semconv/v1.31.0` package.
The package contains semantic conventions from the `v1.31.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.31.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.30.0`. (#6479)
- Add `Recording`, `Scope`, and `Record` types in `go.opentelemetry.io/otel/log/logtest`. (#6507)
- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp`. (#6751)
- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#6752)
- Add `WithHTTPClient` option to configure the `http.Client` used by `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6688)
- Add `ValuesGetter` in `go.opentelemetry.io/otel/propagation`, a `TextMapCarrier` that supports retrieving multiple values for a single key. (#5973)
- Add `Values` method to `HeaderCarrier` to implement the new `ValuesGetter` interface in `go.opentelemetry.io/otel/propagation`. (#5973)
- Update `Baggage` in `go.opentelemetry.io/otel/propagation` to retrieve multiple values for a key when the carrier implements `ValuesGetter`. (#5973)
- Add `AssertEqual` function in `go.opentelemetry.io/otel/log/logtest`. (#6662)
- The `go.opentelemetry.io/otel/semconv/v1.32.0` package.
The package contains semantic conventions from the `v1.32.0` version of the OpenTelemetry Semantic Conventions.
See the [migration documentation](./semconv/v1.32.0/MIGRATION.md) for information on how to upgrade from `go.opentelemetry.io/otel/semconv/v1.31.0`(#6782)
- Add `Transform` option in `go.opentelemetry.io/otel/log/logtest`. (#6794)
- Add `Desc` option in `go.opentelemetry.io/otel/log/logtest`. (#6796)
### Removed
- Drop support for [Go 1.22]. (#6381, #6418)
- Remove `Resource` field from `EnabledParameters` in `go.opentelemetry.io/otel/sdk/log`. (#6494)
- Remove `RecordFactory` type from `go.opentelemetry.io/otel/log/logtest`. (#6492)
- Remove `ScopeRecords`, `EmittedRecord`, and `RecordFactory` types from `go.opentelemetry.io/otel/log/logtest`. (#6507)
- Remove `AssertRecordEqual` function in `go.opentelemetry.io/otel/log/logtest`, use `AssertEqual` instead. (#6662)
### Changed
- ⚠️ Update `github.com/prometheus/client_golang` to `v1.21.1`, which changes the `NameValidationScheme` to `UTF8Validation`.
This allows metrics names to keep original delimiters (e.g. `.`), rather than replacing with underscores.
This can be reverted by setting `github.com/prometheus/common/model.NameValidationScheme` to `LegacyValidation` in `github.com/prometheus/common/model`. (#6433)
- Initialize map with `len(keys)` in `NewAllowKeysFilter` and `NewDenyKeysFilter` to avoid unnecessary allocations in `go.opentelemetry.io/otel/attribute`. (#6455)
- `go.opentelemetry.io/otel/log/logtest` is now a separate Go module. (#6465)
- `go.opentelemetry.io/otel/sdk/log/logtest` is now a separate Go module. (#6466)
- `Recorder` in `go.opentelemetry.io/otel/log/logtest` no longer separately stores records emitted by loggers with the same instrumentation scope. (#6507)
- Improve performance of `BatchProcessor` in `go.opentelemetry.io/otel/sdk/log` by not exporting when exporter cannot accept more. (#6569, #6641)
### Deprecated
- Deprecate support for `model.LegacyValidation` for `go.opentelemetry.io/otel/exporters/prometheus`. (#6449)
### Fixes
- Stop percent encoding header environment variables in `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`. (#6392)
- Ensure the `noopSpan.tracerProvider` method is not inlined in `go.opentelemetry.io/otel/trace` so the `go.opentelemetry.io/auto` instrumentation can instrument non-recording spans. (#6456)
- Use a `sync.Pool` instead of allocating `metricdata.ResourceMetrics` in `go.opentelemetry.io/otel/exporters/prometheus`. (#6472)
## [1.35.0/0.57.0/0.11.0] 2025-03-05 ## [1.35.0/0.57.0/0.11.0] 2025-03-05
This release is the last to support [Go 1.22]. This release is the last to support [Go 1.22].
@ -3237,7 +3535,15 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files. - CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project. - CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.35.0...HEAD [Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.40.0...HEAD
[1.40.0/0.62.0/0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.40.0
[1.39.0/0.61.0/0.15.0/0.0.14]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.39.0
[1.38.0/0.60.0/0.14.0/0.0.13]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.38.0
[0.59.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/exporters/prometheus/v0.59.1
[1.37.0/0.59.0/0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.37.0
[0.12.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.2
[0.12.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/log/v0.12.1
[1.36.0/0.58.0/0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.36.0
[1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0 [1.35.0/0.57.0/0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.35.0
[1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0 [1.34.0/0.56.0/0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.34.0
[1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0 [1.33.0/0.55.0/0.9.0/0.0.12]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.33.0
@ -3329,6 +3635,7 @@ It contains api and sdk for trace and meter.
<!-- Released section ended --> <!-- Released section ended -->
[Go 1.25]: https://go.dev/doc/go1.25
[Go 1.24]: https://go.dev/doc/go1.24 [Go 1.24]: https://go.dev/doc/go1.24
[Go 1.23]: https://go.dev/doc/go1.23 [Go 1.23]: https://go.dev/doc/go1.23
[Go 1.22]: https://go.dev/doc/go1.22 [Go 1.22]: https://go.dev/doc/go1.22

View File

@ -12,6 +12,6 @@
# https://help.github.com/en/articles/about-code-owners # https://help.github.com/en/articles/about-code-owners
# #
* @MrAlias @XSAM @dashpole @pellared @dmathieu * @MrAlias @XSAM @dashpole @pellared @dmathieu @flc1125
CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu CODEOWNERS @MrAlias @pellared @dashpole @XSAM @dmathieu

View File

@ -54,8 +54,8 @@ go get -d go.opentelemetry.io/otel
(This may print some warning about "build constraints exclude all Go (This may print some warning about "build constraints exclude all Go
files", just ignore it.) files", just ignore it.)
This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`.
can alternatively use `git` directly with: Alternatively, you can use `git` directly with:
```sh ```sh
git clone https://github.com/open-telemetry/opentelemetry-go git clone https://github.com/open-telemetry/opentelemetry-go
@ -65,8 +65,7 @@ git clone https://github.com/open-telemetry/opentelemetry-go
that name is a kind of a redirector to GitHub that `go get` can that name is a kind of a redirector to GitHub that `go get` can
understand, but `git` does not.) understand, but `git` does not.)
This would put the project in the `opentelemetry-go` directory in This will add the project as `opentelemetry-go` within the current directory.
current working directory.
Enter the newly created directory and add your fork as a new remote: Enter the newly created directory and add your fork as a new remote:
@ -109,10 +108,9 @@ A PR is considered **ready to merge** when:
This is not enforced through automation, but needs to be validated by the This is not enforced through automation, but needs to be validated by the
maintainer merging. maintainer merging.
* The qualified approvals need to be from [Approver]s/[Maintainer]s * At least one of the qualified approvals needs to be from an
affiliated with different companies. Two qualified approvals from [Approver]/[Maintainer] affiliated with a different company than the author
[Approver]s or [Maintainer]s affiliated with the same company counts as a of the PR.
single qualified approval.
* PRs introducing changes that have already been discussed and consensus * PRs introducing changes that have already been discussed and consensus
reached only need one qualified approval. The discussion and resolution reached only need one qualified approval. The discussion and resolution
needs to be linked to the PR. needs to be linked to the PR.
@ -167,11 +165,11 @@ guidelines](https://opentelemetry.io/docs/specs/otel/library-guidelines).
### Focus on Capabilities, Not Structure Compliance ### Focus on Capabilities, Not Structure Compliance
OpenTelemetry is an evolving specification, one where the desires and OpenTelemetry is an evolving specification, one where the desires and
use cases are clear, but the method to satisfy those uses cases are use cases are clear, but the methods to satisfy those use cases are
not. not.
As such, Contributions should provide functionality and behavior that As such, Contributions should provide functionality and behavior that
conforms to the specification, but the interface and structure is conforms to the specification, but the interface and structure are
flexible. flexible.
It is preferable to have contributions follow the idioms of the It is preferable to have contributions follow the idioms of the
@ -193,6 +191,35 @@ should have `go test -bench` output in their description.
should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat) should have [`benchstat`](https://pkg.go.dev/golang.org/x/perf/cmd/benchstat)
output in their description. output in their description.
## Dependencies
This project uses [Go Modules] for dependency management. All modules will use
`go.mod` to explicitly list all direct and indirect dependencies, ensuring a
clear dependency graph. The `go.sum` file for each module will be committed to
the repository and used to verify the integrity of downloaded modules,
preventing malicious tampering.
This project uses automated dependency update tools (i.e. dependabot,
renovatebot) to manage updates to dependencies. This ensures that dependencies
are kept up-to-date with the latest security patches and features and are
reviewed before being merged. If you would like to propose a change to a
dependency it should be done through a pull request that updates the `go.mod`
file and includes a description of the change.
See the [versioning and compatibility](./VERSIONING.md) policy for more details
about dependency compatibility.
[Go Modules]: https://pkg.go.dev/cmd/go#hdr-Modules__module_versions__and_more
### Environment Dependencies
This project does not partition dependencies based on the environment (i.e.
`development`, `staging`, `production`).
Only the dependencies explicitly included in the released modules have been
tested and verified to work with the released code. No other guarantee is made
about the compatibility of other dependencies.
## Documentation ## Documentation
Each (non-internal, non-test) package must be documented using Each (non-internal, non-test) package must be documented using
@ -234,6 +261,10 @@ For a non-comprehensive but foundational overview of these best practices
the [Effective Go](https://golang.org/doc/effective_go.html) documentation the [Effective Go](https://golang.org/doc/effective_go.html) documentation
is an excellent starting place. is an excellent starting place.
We also recommend following the
[Go Code Review Comments](https://go.dev/wiki/CodeReviewComments)
that collects common comments made during reviews of Go code.
As a convenience for developers building this project the `make precommit` As a convenience for developers building this project the `make precommit`
will format, lint, validate, and in some cases fix the changes you plan to will format, lint, validate, and in some cases fix the changes you plan to
submit. This check will need to pass for your changes to be able to be submit. This check will need to pass for your changes to be able to be
@ -587,6 +618,10 @@ See also:
### Testing ### Testing
We allow using [`testify`](https://github.com/stretchr/testify) even though
it is seen as non-idiomatic according to
the [Go Test Comments](https://go.dev/wiki/TestComments#assert-libraries) page.
The tests should never leak goroutines. The tests should never leak goroutines.
Use the term `ConcurrentSafe` in the test name when it aims to verify the Use the term `ConcurrentSafe` in the test name when it aims to verify the
@ -599,8 +634,8 @@ is not in their root name.
The use of internal packages should be scoped to a single module. A sub-module The use of internal packages should be scoped to a single module. A sub-module
should never import from a parent internal package. This creates a coupling should never import from a parent internal package. This creates a coupling
between the two modules where a user can upgrade the parent without the child between the two modules where a user can upgrade the parent without the child,
and if the internal package API has changed it will fail to upgrade[^3]. and if the internal package API has changed, it will fail to upgrade[^3].
There are two known exceptions to this rule: There are two known exceptions to this rule:
@ -621,7 +656,7 @@ this.
### Ignoring context cancellation ### Ignoring context cancellation
OpenTelemetry API implementations need to ignore the cancellation of the context that are OpenTelemetry API implementations need to ignore the cancellation of the context that is
passed when recording a value (e.g. starting a span, recording a measurement, emitting a log). passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
Recording methods should not return an error describing the cancellation state of the context Recording methods should not return an error describing the cancellation state of the context
when they complete, nor should they abort any work. when they complete, nor should they abort any work.
@ -639,32 +674,478 @@ force flushing telemetry, shutting down a signal provider) the context cancellat
should be honored. This means all work done on behalf of the user provided context should be honored. This means all work done on behalf of the user provided context
should be canceled. should be canceled.
### Observability
OpenTelemetry Go SDK components should be instrumented to enable users observability for the health and performance of the telemetry pipeline itself.
This allows operators to understand how well their observability infrastructure is functioning and to identify potential issues before they impact their applications.
This section outlines the best practices for building instrumentation in OpenTelemetry Go SDK components.
#### Environment Variable Activation
Observability features are currently experimental.
They should be disabled by default and activated through the `OTEL_GO_X_OBSERVABILITY` environment variable.
This follows the established experimental feature pattern used throughout the SDK.
Components should check for this environment variable using a consistent pattern:
```go
import "go.opentelemetry.io/otel/*/internal/x"
if x.Observability.Enabled() {
// Initialize observability metrics
}
```
**References**:
- [stdouttrace exporter](./exporters/stdout/stdouttrace/internal/x/x.go)
- [sdk](./sdk/internal/x/x.go)
#### Encapsulation
Instrumentation should be encapsulated within a dedicated `struct` (e.g. `instrumentation`).
It should not be mixed into the instrumented component.
Prefer this:
```go
type SDKComponent struct {
inst *instrumentation
}
type instrumentation struct {
inflight otelconv.SDKComponentInflight
exported otelconv.SDKComponentExported
}
```
To this:
```go
// ❌ Avoid this pattern.
type SDKComponent struct {
/* other SDKComponent fields... */
inflight otelconv.SDKComponentInflight
exported otelconv.SDKComponentExported
}
```
The instrumentation code should not bloat the code being instrumented.
Likely, this means its own file, or its own package if it is complex or reused.
#### Initialization
Instrumentation setup should be explicit, side-effect free, and local to the relevant component.
Avoid relying on global or implicit [side effects][side-effect] for initialization.
Encapsulate setup in constructor functions, ensuring clear ownership and scope:
```go
import (
"errors"
semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
"go.opentelemetry.io/otel/semconv/v1.39.0/otelconv"
)
type SDKComponent struct {
inst *instrumentation
}
func NewSDKComponent(config Config) (*SDKComponent, error) {
inst, err := newInstrumentation()
if err != nil {
return nil, err
}
return &SDKComponent{inst: inst}, nil
}
type instrumentation struct {
inflight otelconv.SDKComponentInflight
exported otelconv.SDKComponentExported
}
func newInstrumentation() (*instrumentation, error) {
if !x.Observability.Enabled() {
return nil, nil
}
meter := otel.GetMeterProvider().Meter(
"<component-package-name>",
metric.WithInstrumentationVersion(sdk.Version()),
metric.WithSchemaURL(semconv.SchemaURL),
)
inst := &instrumentation{}
var err, e error
inst.inflight, e = otelconv.NewSDKComponentInflight(meter)
err = errors.Join(err, e)
inst.exported, e = otelconv.NewSDKComponentExported(meter)
err = errors.Join(err, e)
return inst, err
}
```
```go
// ❌ Avoid this pattern.
func (c *Component) initObservability() {
// Initialize observability metrics
if !x.Observability.Enabled() {
return
}
// Initialize observability metrics
c.inst = &instrumentation{/* ... */}
}
```
[side-effect]: https://en.wikipedia.org/wiki/Side_effect_(computer_science)
#### Performance
When observability is disabled there should be little to no overhead.
```go
func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
if e.inst != nil {
attrs := expensiveOperation()
e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...)
}
// Export spans...
}
```
```go
// ❌ Avoid this pattern.
func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
attrs := expensiveOperation()
e.inst.recordSpanInflight(ctx, int64(len(spans)), attrs...)
// Export spans...
}
func (i *instrumentation) recordSpanInflight(ctx context.Context, count int64, attrs ...attribute.KeyValue) {
if i == nil || i.inflight == nil {
return
}
i.inflight.Add(ctx, count, metric.WithAttributes(attrs...))
}
```
When observability is enabled, the instrumentation code paths should be optimized to reduce allocation and computation overhead.
##### Attribute and Option Allocation Management
Pool attribute slices and options with [`sync.Pool`] to minimize allocations in measurement calls with dynamic attributes.
```go
var (
attrPool = sync.Pool{
New: func() any {
// Pre-allocate common capacity
knownCap := 8 // Adjust based on expected usage
s := make([]attribute.KeyValue, 0, knownCap)
// Return a pointer to avoid extra allocation on Put().
return &s
},
}
addOptPool = &sync.Pool{
New: func() any {
const n = 1 // WithAttributeSet
o := make([]metric.AddOption, 0, n)
// Return a pointer to avoid extra allocation on Put().
return &o
},
}
)
func (i *instrumentation) record(ctx context.Context, value int64, baseAttrs ...attribute.KeyValue) {
attrs := attrPool.Get().(*[]attribute.KeyValue)
defer func() {
*attrs = (*attrs)[:0] // Reset.
attrPool.Put(attrs)
}()
*attrs = append(*attrs, baseAttrs...)
// Add any dynamic attributes.
*attrs = append(*attrs, semconv.OTelComponentName("exporter-1"))
addOpt := addOptPool.Get().(*[]metric.AddOption)
defer func() {
*addOpt = (*addOpt)[:0]
addOptPool.Put(addOpt)
}()
set := attribute.NewSet(*attrs...)
*addOpt = append(*addOpt, metric.WithAttributeSet(set))
i.counter.Add(ctx, value, *addOpt...)
}
```
Pools are most effective when there are many pooled objects of the same sufficiently large size, and the objects are repeatedly used.
This amortizes the cost of allocation and synchronization.
Ideally, the pools should be scoped to be used as widely as possible within the component to maximize this efficiency while still ensuring correctness.
[`sync.Pool`]: https://pkg.go.dev/sync#Pool
##### Cache common attribute sets for repeated measurements
If a static set of attributes are used for measurements and they are known at compile time, pre-compute and cache these attributes.
```go
type spanLiveSetKey struct {
sampled bool
}
var spanLiveSetCache = map[spanLiveSetKey]attribute.Set{
{true}: attribute.NewSet(
otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
otelconv.SpanSamplingResultRecordAndSample,
),
),
{false}: attribute.NewSet(
otelconv.SDKSpanLive{}.AttrSpanSamplingResult(
otelconv.SpanSamplingResultRecordOnly,
),
),
}
func spanLiveSet(sampled bool) attribute.Set {
key := spanLiveSetKey{sampled: sampled}
return spanLiveSetCache[key]
}
```
##### Benchmarking
Always provide benchmarks when introducing or refactoring instrumentation.
Demonstrate the impact (allocs/op, B/op, ns/op) in enabled/disabled scenarios:
```go
func BenchmarkExportSpans(b *testing.B) {
scenarios := []struct {
name string
obsEnabled bool
}{
{"ObsDisabled", false},
{"ObsEnabled", true},
}
for _, scenario := range scenarios {
b.Run(scenario.name, func(b *testing.B) {
b.Setenv(
"OTEL_GO_X_OBSERVABILITY",
strconv.FormatBool(scenario.obsEnabled),
)
exporter := NewExporter()
spans := generateTestSpans(100)
b.ResetTimer()
b.ReportAllocs()
for i := 0; i < b.N; i++ {
_ = exporter.ExportSpans(context.Background(), spans)
}
})
}
}
```
#### Error Handling and Robustness
Errors should be reported back to the caller if possible, and partial failures should be handled as gracefully as possible.
```go
func newInstrumentation() (*instrumentation, error) {
if !x.Observability.Enabled() {
return nil, nil
}
m := otel.GetMeterProvider().Meter(/* initialize meter */)
counter, err := otelconv.NewSDKComponentCounter(m)
// Use the partially initialized counter if available.
i := &instrumentation{counter: counter}
// Return any error to the caller.
return i, err
}
```
```go
// ❌ Avoid this pattern.
func newInstrumentation() *instrumentation {
if !x.Observability.Enabled() {
return nil, nil
}
m := otel.GetMeterProvider().Meter(/* initialize meter */)
counter, err := otelconv.NewSDKComponentCounter(m)
if err != nil {
// ❌ Do not dump the error to the OTel Handler. Return it to the
// caller.
otel.Handle(err)
// ❌ Do not return nil if we can still use the partially initialized
// counter.
return nil
}
return &instrumentation{counter: counter}
}
```
If the instrumented component cannot report the error to the user, let it report the error to `otel.Handle`.
#### Context Propagation
Ensure observability measurements receive the correct context, especially for trace exemplars and distributed context:
```go
func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
// Use the provided context for observability measurements
e.inst.recordSpanExportStarted(ctx, len(spans))
err := e.doExport(ctx, spans)
if err != nil {
e.inst.recordSpanExportFailed(ctx, len(spans), err)
} else {
e.inst.recordSpanExportSucceeded(ctx, len(spans))
}
return err
}
```
```go
// ❌ Avoid this pattern.
func (e *Exporter) ExportSpans(ctx context.Context, spans []trace.ReadOnlySpan) error {
// ❌ Do not break the context propagation.
e.inst.recordSpanExportStarted(context.Background(), len(spans))
err := e.doExport(ctx, spans)
/* ... */
return err
}
```
#### Semantic Conventions Compliance
All observability metrics should follow the [OpenTelemetry Semantic Conventions for SDK metrics](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/otel/sdk-metrics.md).
Use the metric semantic conventions convenience package [otelconv](./semconv/v1.39.0/otelconv/metric.go).
##### Component Identification
Component names and types should follow [semantic convention](https://github.com/open-telemetry/semantic-conventions/blob/1cf2476ae5e518225a766990a28a6d5602bd5a30/docs/registry/attributes/otel.md#otel-component-attributes).
If a component is not a well-known type specified in the semantic conventions, use the package path scope type as a stable identifier.
```go
componentType := "go.opentelemetry.io/otel/sdk/trace.Span"
```
```go
// ❌ Do not do this.
componentType := "trace-span"
```
The component name should be a stable unique identifier for the specific instance of the component.
Use a global counter to ensure uniqueness if necessary.
```go
// Unique 0-based ID counter for component instances.
var componentIDCounter atomic.Int64
// nextID returns the next unique ID for a component.
func nextID() int64 {
return componentIDCounter.Add(1) - 1
}
// componentName returns a unique name for the component instance.
func componentName() attribute.KeyValue {
id := nextID()
name := fmt.Sprintf("%s/%d", componentType, id)
return semconv.OTelComponentName(name)
}
```
The component ID will need to be resettable for deterministic testing.
If tests are in a different package than the component being tested (i.e. a `<component package>_test` package name), use a generated `counter` internal package to manage the counter.
See [stdouttrace exporter example](./exporters/stdout/stdouttrace/internal/gen.go) for reference.
#### Testing
Use deterministic testing with isolated state:
```go
func TestObservability(t *testing.T) {
// Restore state after test to ensure this does not affect other tests.
prev := otel.GetMeterProvider()
t.Cleanup(func() { otel.SetMeterProvider(prev) })
// Isolate the meter provider for deterministic testing
reader := metric.NewManualReader()
meterProvider := metric.NewMeterProvider(metric.WithReader(reader))
otel.SetMeterProvider(meterProvider)
// Use t.Setenv to ensure environment variable is restored after test.
t.Setenv("OTEL_GO_X_OBSERVABILITY", "true")
// Reset component ID counter to ensure deterministic component names.
componentIDCounter.Store(0)
/* ... test code ... */
}
```
Test order should not affect results.
Ensure that any global state (e.g. component ID counters) is reset between tests.
## Approvers and Maintainers ## Approvers and Maintainers
### Triagers
- [Cheng-Zhen Yang](https://github.com/scorpionknifes), Independent
### Approvers
### Maintainers ### Maintainers
- [Damien Mathieu](https://github.com/dmathieu), Elastic - [Damien Mathieu](https://github.com/dmathieu), Elastic ([GPG](https://keys.openpgp.org/search?q=5A126B972A81A6CE443E5E1B408B8E44F0873832))
- [David Ashpole](https://github.com/dashpole), Google - [David Ashpole](https://github.com/dashpole), Google ([GPG](https://keys.openpgp.org/search?q=C0D1BDDCAAEAE573673085F176327DA4D864DC70))
- [Robert Pająk](https://github.com/pellared), Splunk - [Robert Pająk](https://github.com/pellared), Splunk ([GPG](https://keys.openpgp.org/search?q=CDAD3A60476A3DE599AA5092E5F7C35A4DBE90C2))
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Sam Xie](https://github.com/XSAM), Splunk ([GPG](https://keys.openpgp.org/search?q=AEA033782371ABB18EE39188B8044925D6FEEBEA))
- [Tyler Yahn](https://github.com/MrAlias), Splunk - [Tyler Yahn](https://github.com/MrAlias), Splunk ([GPG](https://keys.openpgp.org/search?q=0x46B0F3E1A8B1BA5A))
For more information about the maintainer role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#maintainer).
### Approvers
- [Flc](https://github.com/flc1125), Independent
For more information about the approver role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#approver).
### Triagers
- [Alex Kats](https://github.com/akats7), Capital One
For more information about the triager role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#triager).
### Emeritus ### Emeritus
- [Aaron Clawson](https://github.com/MadVikingGod) - [Aaron Clawson](https://github.com/MadVikingGod)
- [Anthony Mirabella](https://github.com/Aneurysm9) - [Anthony Mirabella](https://github.com/Aneurysm9)
- [Cheng-Zhen Yang](https://github.com/scorpionknifes)
- [Chester Cheung](https://github.com/hanyuancheung) - [Chester Cheung](https://github.com/hanyuancheung)
- [Evan Torrie](https://github.com/evantorrie) - [Evan Torrie](https://github.com/evantorrie)
- [Gustavo Silva Paiva](https://github.com/paivagustavo) - [Gustavo Silva Paiva](https://github.com/paivagustavo)
- [Josh MacDonald](https://github.com/jmacd) - [Josh MacDonald](https://github.com/jmacd)
- [Liz Fong-Jones](https://github.com/lizthegrey) - [Liz Fong-Jones](https://github.com/lizthegrey)
For more information about the emeritus role, see the [community repository](https://github.com/open-telemetry/community/blob/main/guides/contributor/membership.md#emeritus-maintainerapprovertriager).
### Become an Approver or a Maintainer ### Become an Approver or a Maintainer
See the [community membership document in OpenTelemetry community See the [community membership document in OpenTelemetry community

View File

@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
--------------------------------------------------------------------------------
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -34,17 +34,17 @@ $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS)
MULTIMOD = $(TOOLS)/multimod MULTIMOD = $(TOOLS)/multimod
$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod $(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod
SEMCONVGEN = $(TOOLS)/semconvgen
$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen
CROSSLINK = $(TOOLS)/crosslink CROSSLINK = $(TOOLS)/crosslink
$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink $(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/build-tools/crosslink
SEMCONVKIT = $(TOOLS)/semconvkit SEMCONVKIT = $(TOOLS)/semconvkit
$(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit $(TOOLS)/semconvkit: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/semconvkit
VERIFYREADMES = $(TOOLS)/verifyreadmes
$(TOOLS)/verifyreadmes: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/verifyreadmes
GOLANGCI_LINT = $(TOOLS)/golangci-lint GOLANGCI_LINT = $(TOOLS)/golangci-lint
$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint $(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/v2/cmd/golangci-lint
MISSPELL = $(TOOLS)/misspell MISSPELL = $(TOOLS)/misspell
$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell $(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell
@ -68,7 +68,7 @@ GOVULNCHECK = $(TOOLS)/govulncheck
$(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck $(TOOLS)/govulncheck: PACKAGE=golang.org/x/vuln/cmd/govulncheck
.PHONY: tools .PHONY: tools
tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(SEMCONVGEN) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE) tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(VERIFYREADMES) $(MULTIMOD) $(SEMCONVKIT) $(GOTMPL) $(GORELEASE)
# Virtualized python tools via docker # Virtualized python tools via docker
@ -146,11 +146,12 @@ build-tests/%:
# Tests # Tests
TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe TEST_TARGETS := test-default test-bench test-short test-verbose test-race test-concurrent-safe test-fuzz
.PHONY: $(TEST_TARGETS) test .PHONY: $(TEST_TARGETS) test
test-default test-race: ARGS=-race test-default test-race: ARGS=-race
test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=.
test-short: ARGS=-short test-short: ARGS=-short
test-fuzz: ARGS=-fuzztime=10s -fuzz
test-verbose: ARGS=-v -race test-verbose: ARGS=-v -race
test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race test-concurrent-safe: ARGS=-run=ConcurrentSafe -count=100 -race
test-concurrent-safe: TIMEOUT=120 test-concurrent-safe: TIMEOUT=120
@ -213,11 +214,8 @@ go-mod-tidy/%: crosslink
&& cd $(DIR) \ && cd $(DIR) \
&& $(GO) mod tidy -compat=1.21 && $(GO) mod tidy -compat=1.21
.PHONY: lint-modules
lint-modules: go-mod-tidy
.PHONY: lint .PHONY: lint
lint: misspell lint-modules golangci-lint govulncheck lint: misspell go-mod-tidy golangci-lint govulncheck
.PHONY: vanity-import-check .PHONY: vanity-import-check
vanity-import-check: $(PORTO) vanity-import-check: $(PORTO)
@ -284,7 +282,7 @@ semconv-generate: $(SEMCONVKIT)
docker run --rm \ docker run --rm \
-u $(DOCKER_USER) \ -u $(DOCKER_USER) \
--env HOME=/tmp/weaver \ --env HOME=/tmp/weaver \
--mount 'type=bind,source=$(PWD)/semconv,target=/home/weaver/templates/registry/go,readonly' \ --mount 'type=bind,source=$(PWD)/semconv/templates,target=/home/weaver/templates,readonly' \
--mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \ --mount 'type=bind,source=$(PWD)/semconv/${TAG},target=/home/weaver/target' \
--mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \ --mount 'type=bind,source=$(HOME)/.weaver,target=/tmp/weaver/.weaver' \
$(WEAVER_IMAGE) registry generate \ $(WEAVER_IMAGE) registry generate \
@ -293,7 +291,7 @@ semconv-generate: $(SEMCONVKIT)
--param tag=$(TAG) \ --param tag=$(TAG) \
go \ go \
/home/weaver/target /home/weaver/target
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" $(SEMCONVKIT) -semconv "$(SEMCONVPKG)" -tag "$(TAG)"
.PHONY: gorelease .PHONY: gorelease
gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%) gorelease: $(OTEL_GO_MOD_DIRS:%=gorelease/%)
@ -319,10 +317,11 @@ add-tags: verify-mods
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
$(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
MARKDOWNIMAGE := $(shell awk '$$4=="markdown" {print $$2}' $(DEPENDENCIES_DOCKERFILE))
.PHONY: lint-markdown .PHONY: lint-markdown
lint-markdown: lint-markdown:
docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md docker run --rm -u $(DOCKER_USER) -v "$(CURDIR):$(WORKDIR)" $(MARKDOWNIMAGE) -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
.PHONY: verify-readmes .PHONY: verify-readmes
verify-readmes: verify-readmes: $(VERIFYREADMES)
./verify_readmes.sh $(VERIFYREADMES)

View File

@ -6,6 +6,8 @@
[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) [![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel)
[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go) [![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/open-telemetry/opentelemetry-go/badge)](https://scorecard.dev/viewer/?uri=github.com/open-telemetry/opentelemetry-go)
[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996) [![OpenSSF Best Practices](https://www.bestpractices.dev/projects/9996/badge)](https://www.bestpractices.dev/projects/9996)
[![Fuzzing Status](https://oss-fuzz-build-logs.storage.googleapis.com/badges/opentelemetry-go.svg)](https://issues.oss-fuzz.com/issues?q=project:opentelemetry-go)
[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go.svg?type=shield&issueType=license)](https://app.fossa.com/projects/custom%2B162%2Fgithub.com%2Fopen-telemetry%2Fopentelemetry-go?ref=badge_shield&issueType=license)
[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) [![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT)
OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/).
@ -51,27 +53,20 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture | | OS | Go Version | Architecture |
|----------|------------|--------------| |----------|------------|--------------|
| Ubuntu | 1.25 | amd64 |
| Ubuntu | 1.24 | amd64 | | Ubuntu | 1.24 | amd64 |
| Ubuntu | 1.23 | amd64 | | Ubuntu | 1.25 | 386 |
| Ubuntu | 1.22 | amd64 |
| Ubuntu | 1.24 | 386 | | Ubuntu | 1.24 | 386 |
| Ubuntu | 1.23 | 386 | | Ubuntu | 1.25 | arm64 |
| Ubuntu | 1.22 | 386 |
| Ubuntu | 1.24 | arm64 | | Ubuntu | 1.24 | arm64 |
| Ubuntu | 1.23 | arm64 | | macOS | 1.25 | amd64 |
| Ubuntu | 1.22 | arm64 | | macOS | 1.24 | amd64 |
| macOS 13 | 1.24 | amd64 | | macOS | 1.25 | arm64 |
| macOS 13 | 1.23 | amd64 |
| macOS 13 | 1.22 | amd64 |
| macOS | 1.24 | arm64 | | macOS | 1.24 | arm64 |
| macOS | 1.23 | arm64 | | Windows | 1.25 | amd64 |
| macOS | 1.22 | arm64 |
| Windows | 1.24 | amd64 | | Windows | 1.24 | amd64 |
| Windows | 1.23 | amd64 | | Windows | 1.25 | 386 |
| Windows | 1.22 | amd64 |
| Windows | 1.24 | 386 | | Windows | 1.24 | 386 |
| Windows | 1.23 | 386 |
| Windows | 1.22 | 386 |
While this project should work for other systems, no compatibility guarantees While this project should work for other systems, no compatibility guarantees
are made for those systems currently. are made for those systems currently.

View File

@ -1,5 +1,9 @@
# Release Process # Release Process
## Create a `Version Release` issue
Create a `Version Release` issue to track the release process.
## Semantic Convention Generation ## Semantic Convention Generation
New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated. New versions of the [OpenTelemetry Semantic Conventions] mean new versions of the `semconv` package need to be generated.
@ -20,7 +24,7 @@ Ensure things look correct before submitting a pull request to include the addit
## Breaking changes validation ## Breaking changes validation
You can run `make gorelease` that runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes done in the public API. You can run `make gorelease` which runs [gorelease](https://pkg.go.dev/golang.org/x/exp/cmd/gorelease) to ensure that there are no unwanted changes made in the public API.
You can check/report problems with `gorelease` [here](https://golang.org/issues/26420). You can check/report problems with `gorelease` [here](https://golang.org/issues/26420).
@ -58,7 +62,7 @@ Update go.mod for submodules to depend on the new release which will happen in t
``` ```
3. Update the [Changelog](./CHANGELOG.md). 3. Update the [Changelog](./CHANGELOG.md).
- Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. - Make sure all relevant changes for this release are included and are written in language that non-contributors to the project can understand.
To verify this, you can look directly at the commits since the `<last tag>`. To verify this, you can look directly at the commits since the `<last tag>`.
``` ```
@ -103,11 +107,50 @@ It is critical you make sure the version you push upstream is correct.
... ...
``` ```
## Sign artifacts
To ensure we comply with CNCF best practices, we need to sign the release artifacts.
Download the `.tar.gz` and `.zip` archives from the [tags page](https://github.com/open-telemetry/opentelemetry-go/tags) for the new release tag.
Both archives need to be signed with your GPG key.
You can use [this script] to verify the contents of the archives before signing them.
To find your GPG key ID, run:
```terminal
gpg --list-secret-keys --keyid-format=long
```
The key ID is the 16-character string after `sec rsa4096/` (or similar).
Set environment variables and sign both artifacts:
```terminal
export VERSION="<version>" # e.g., v1.32.0
export KEY_ID="<your-gpg-key-id>"
gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.tar.gz
gpg --local-user $KEY_ID --armor --detach-sign opentelemetry-go-$VERSION.zip
```
You can verify the signatures with:
```terminal
gpg --verify opentelemetry-go-$VERSION.tar.gz.asc opentelemetry-go-$VERSION.tar.gz
gpg --verify opentelemetry-go-$VERSION.zip.asc opentelemetry-go-$VERSION.zip
```
[this script]: https://github.com/MrAlias/attest-sh
## Release ## Release
Finally create a Release for the new `<new tag>` on GitHub. Finally create a Release for the new `<new tag>` on GitHub.
The release body should include all the release notes from the Changelog for this release. The release body should include all the release notes from the Changelog for this release.
***IMPORTANT***: GitHub Releases are immutable once created.
You must upload the signed artifacts (`.tar.gz`, `.tar.gz.asc`, `.zip`, and `.zip.asc`) when creating the release, as they cannot be added or modified later.
## Post-Release ## Post-Release
### Contrib Repository ### Contrib Repository
@ -123,10 +166,16 @@ Importantly, bump any package versions referenced to be the latest one you just
[Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/ [Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/
[content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go [content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go
### Demo Repository ### Close the milestone
Bump the dependencies in the following Go services: Once a release is made, ensure all issues that were fixed and PRs that were merged as part of this release are added to the corresponding milestone.
This helps track what changes were included in each release.
- [`accounting`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/accounting) - To find issues that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/issues?q=is%3Aissue%20no%3Amilestone%20is%3Aclosed%20sort%3Aupdated-desc%20reason%3Acompleted%20-label%3AStale%20linked%3Apr)
- [`checkoutservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/checkout) - To find merged PRs that haven't been included in a milestone, use this [GitHub search query](https://github.com/open-telemetry/opentelemetry-go/pulls?q=is%3Apr+no%3Amilestone+is%3Amerged).
- [`productcatalogservice`](https://github.com/open-telemetry/opentelemetry-demo/tree/main/src/product-catalog)
Once all related issues and PRs have been added to the milestone, close the milestone.
### Close the `Version Release` issue
Once the todo list in the `Version Release` issue is complete, close the issue.

203
vendor/go.opentelemetry.io/otel/SECURITY-INSIGHTS.yml generated vendored Normal file
View File

@ -0,0 +1,203 @@
header:
schema-version: "1.0.0"
expiration-date: "2026-08-04T00:00:00.000Z"
last-updated: "2025-08-04"
last-reviewed: "2025-08-04"
commit-hash: 69e81088ad40f45a0764597326722dea8f3f00a8
project-url: https://github.com/open-telemetry/opentelemetry-go
project-release: "v1.37.0"
changelog: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CHANGELOG.md
license: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/LICENSE
project-lifecycle:
status: active
bug-fixes-only: false
core-maintainers:
- https://github.com/dmathieu
- https://github.com/dashpole
- https://github.com/pellared
- https://github.com/XSAM
- https://github.com/MrAlias
release-process: |
See https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/RELEASING.md
contribution-policy:
accepts-pull-requests: true
accepts-automated-pull-requests: true
automated-tools-list:
- automated-tool: dependabot
action: allowed
comment: Automated dependency updates are accepted.
- automated-tool: renovatebot
action: allowed
comment: Automated dependency updates are accepted.
- automated-tool: opentelemetrybot
action: allowed
comment: Automated OpenTelemetry actions are accepted.
contributing-policy: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
code-of-conduct: https://github.com/open-telemetry/.github/blob/ffa15f76b65ec7bcc41f6a0b277edbb74f832206/CODE_OF_CONDUCT.md
documentation:
- https://pkg.go.dev/go.opentelemetry.io/otel
- https://opentelemetry.io/docs/instrumentation/go/
distribution-points:
- pkg:golang/go.opentelemetry.io/otel
- pkg:golang/go.opentelemetry.io/otel/bridge/opencensus
- pkg:golang/go.opentelemetry.io/otel/bridge/opencensus/test
- pkg:golang/go.opentelemetry.io/otel/bridge/opentracing
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp
- pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutmetric
- pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdouttrace
- pkg:golang/go.opentelemetry.io/otel/exporters/zipkin
- pkg:golang/go.opentelemetry.io/otel/metric
- pkg:golang/go.opentelemetry.io/otel/sdk
- pkg:golang/go.opentelemetry.io/otel/sdk/metric
- pkg:golang/go.opentelemetry.io/otel/trace
- pkg:golang/go.opentelemetry.io/otel/exporters/prometheus
- pkg:golang/go.opentelemetry.io/otel/log
- pkg:golang/go.opentelemetry.io/otel/log/logtest
- pkg:golang/go.opentelemetry.io/otel/sdk/log
- pkg:golang/go.opentelemetry.io/otel/sdk/log/logtest
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploggrpc
- pkg:golang/go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp
- pkg:golang/go.opentelemetry.io/otel/exporters/stdout/stdoutlog
- pkg:golang/go.opentelemetry.io/otel/schema
security-artifacts:
threat-model:
threat-model-created: false
comment: |
No formal threat model created yet.
self-assessment:
self-assessment-created: false
comment: |
No formal self-assessment yet.
security-testing:
- tool-type: sca
tool-name: Dependabot
tool-version: latest
tool-url: https://github.com/dependabot
tool-rulesets:
- built-in
integration:
ad-hoc: false
ci: true
before-release: true
comment: |
Automated dependency updates.
- tool-type: sast
tool-name: golangci-lint
tool-version: latest
tool-url: https://github.com/golangci/golangci-lint
tool-rulesets:
- built-in
integration:
ad-hoc: false
ci: true
before-release: true
comment: |
Static analysis in CI.
- tool-type: fuzzing
tool-name: OSS-Fuzz
tool-version: latest
tool-url: https://github.com/google/oss-fuzz
tool-rulesets:
- default
integration:
ad-hoc: false
ci: false
before-release: false
comment: |
OpenTelemetry Go is integrated with OSS-Fuzz for continuous fuzz testing. See https://github.com/google/oss-fuzz/tree/f0f9b221190c6063a773bea606d192ebfc3d00cf/projects/opentelemetry-go for more details.
- tool-type: sast
tool-name: CodeQL
tool-version: latest
tool-url: https://github.com/github/codeql
tool-rulesets:
- default
integration:
ad-hoc: false
ci: true
before-release: true
comment: |
CodeQL static analysis is run in CI for all commits and pull requests to detect security vulnerabilities in the Go source code. See https://github.com/open-telemetry/opentelemetry-go/blob/d5b5b059849720144a03ca5c87561bfbdb940119/.github/workflows/codeql-analysis.yml for workflow details.
- tool-type: sca
tool-name: govulncheck
tool-version: latest
tool-url: https://pkg.go.dev/golang.org/x/vuln/cmd/govulncheck
tool-rulesets:
- default
integration:
ad-hoc: false
ci: true
before-release: true
comment: |
govulncheck is run in CI to detect known vulnerabilities in Go modules and code paths. See https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/.github/workflows/ci.yml for workflow configuration.
security-assessments:
- auditor-name: 7ASecurity
auditor-url: https://7asecurity.com
auditor-report: https://7asecurity.com/reports/pentest-report-opentelemetry.pdf
report-year: 2023
comment: |
This independent penetration test by 7ASecurity covered OpenTelemetry repositories including opentelemetry-go. The assessment focused on codebase review, threat modeling, and vulnerability identification. See the report for details of findings and recommendations applicable to opentelemetry-go. No critical vulnerabilities were found for this repository.
security-contacts:
- type: email
value: cncf-opentelemetry-security@lists.cncf.io
primary: true
- type: website
value: https://github.com/open-telemetry/opentelemetry-go/security/policy
primary: false
vulnerability-reporting:
accepts-vulnerability-reports: true
email-contact: cncf-opentelemetry-security@lists.cncf.io
security-policy: https://github.com/open-telemetry/opentelemetry-go/security/policy
comment: |
Security issues should be reported via email or GitHub security policy page.
dependencies:
third-party-packages: true
dependencies-lists:
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opencensus/test/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/bridge/opentracing/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploggrpc/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlplog/otlploghttp/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetricgrpc/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlpmetric/otlpmetrichttp/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracegrpc/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/otlp/otlptrace/otlptracehttp/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/prometheus/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutlog/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdoutmetric/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/stdout/stdouttrace/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/exporters/zipkin/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/internal/tools/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/log/logtest/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/metric/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/schema/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/log/logtest/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/sdk/metric/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/go.mod
- https://github.com/open-telemetry/opentelemetry-go/blob/v1.37.0/trace/internal/telemetry/test/go.mod
dependencies-lifecycle:
policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
comment: |
Dependency lifecycle managed via go.mod and renovatebot.
env-dependencies-policy:
policy-url: https://github.com/open-telemetry/opentelemetry-go/blob/69e81088ad40f45a0764597326722dea8f3f00a8/CONTRIBUTING.md
comment: |
See contributing policy for environment usage.

View File

@ -83,7 +83,7 @@ is designed so the following goals can be achieved.
in either the module path or the import path. in either the module path or the import path.
* In addition to public APIs, telemetry produced by stable instrumentation * In addition to public APIs, telemetry produced by stable instrumentation
will remain stable and backwards compatible. This is to avoid breaking will remain stable and backwards compatible. This is to avoid breaking
alerts and dashboard. alerts and dashboards.
* Modules will be used to encapsulate instrumentation, detectors, exporters, * Modules will be used to encapsulate instrumentation, detectors, exporters,
propagators, and any other independent sets of related components. propagators, and any other independent sets of related components.
* Experimental modules still under active development will be versioned at * Experimental modules still under active development will be versioned at

View File

@ -16,7 +16,7 @@ type (
// set into a wire representation. // set into a wire representation.
Encoder interface { Encoder interface {
// Encode returns the serialized encoding of the attribute set using // Encode returns the serialized encoding of the attribute set using
// its Iterator. This result may be cached by a attribute.Set. // its Iterator. This result may be cached by an attribute.Set.
Encode(iterator Iterator) string Encode(iterator Iterator) string
// ID returns a value that is unique for each class of attribute // ID returns a value that is unique for each class of attribute
@ -78,7 +78,7 @@ func DefaultEncoder() Encoder {
defaultEncoderOnce.Do(func() { defaultEncoderOnce.Do(func() {
defaultEncoderInstance = &defaultAttrEncoder{ defaultEncoderInstance = &defaultAttrEncoder{
pool: sync.Pool{ pool: sync.Pool{
New: func() interface{} { New: func() any {
return &bytes.Buffer{} return &bytes.Buffer{}
}, },
}, },
@ -96,11 +96,11 @@ func (d *defaultAttrEncoder) Encode(iter Iterator) string {
for iter.Next() { for iter.Next() {
i, keyValue := iter.IndexedAttribute() i, keyValue := iter.IndexedAttribute()
if i > 0 { if i > 0 {
_, _ = buf.WriteRune(',') _ = buf.WriteByte(',')
} }
copyAndEscape(buf, string(keyValue.Key)) copyAndEscape(buf, string(keyValue.Key))
_, _ = buf.WriteRune('=') _ = buf.WriteByte('=')
if keyValue.Value.Type() == STRING { if keyValue.Value.Type() == STRING {
copyAndEscape(buf, keyValue.Value.AsString()) copyAndEscape(buf, keyValue.Value.AsString())
@ -122,14 +122,14 @@ func copyAndEscape(buf *bytes.Buffer, val string) {
for _, ch := range val { for _, ch := range val {
switch ch { switch ch {
case '=', ',', escapeChar: case '=', ',', escapeChar:
_, _ = buf.WriteRune(escapeChar) _ = buf.WriteByte(escapeChar)
} }
_, _ = buf.WriteRune(ch) _, _ = buf.WriteRune(ch)
} }
} }
// Valid returns true if this encoder ID was allocated by // Valid reports whether this encoder ID was allocated by
// `NewEncoderID`. Invalid encoder IDs will not be cached. // [NewEncoderID]. Invalid encoder IDs will not be cached.
func (id EncoderID) Valid() bool { func (id EncoderID) Valid() bool {
return id.value != 0 return id.value != 0
} }

View File

@ -15,11 +15,11 @@ type Filter func(KeyValue) bool
// //
// If keys is empty a deny-all filter is returned. // If keys is empty a deny-all filter is returned.
func NewAllowKeysFilter(keys ...Key) Filter { func NewAllowKeysFilter(keys ...Key) Filter {
if len(keys) <= 0 { if len(keys) == 0 {
return func(kv KeyValue) bool { return false } return func(KeyValue) bool { return false }
} }
allowed := make(map[Key]struct{}) allowed := make(map[Key]struct{}, len(keys))
for _, k := range keys { for _, k := range keys {
allowed[k] = struct{}{} allowed[k] = struct{}{}
} }
@ -34,11 +34,11 @@ func NewAllowKeysFilter(keys ...Key) Filter {
// //
// If keys is empty an allow-all filter is returned. // If keys is empty an allow-all filter is returned.
func NewDenyKeysFilter(keys ...Key) Filter { func NewDenyKeysFilter(keys ...Key) Filter {
if len(keys) <= 0 { if len(keys) == 0 {
return func(kv KeyValue) bool { return true } return func(KeyValue) bool { return true }
} }
forbid := make(map[Key]struct{}) forbid := make(map[Key]struct{}, len(keys))
for _, k := range keys { for _, k := range keys {
forbid[k] = struct{}{} forbid[k] = struct{}{}
} }

92
vendor/go.opentelemetry.io/otel/attribute/hash.go generated vendored Normal file
View File

@ -0,0 +1,92 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"fmt"
"reflect"
"go.opentelemetry.io/otel/attribute/internal/xxhash"
)
// Type identifiers. These identifiers are hashed before the value of the
// corresponding type. This is done to distinguish values that are hashed with
// the same value representation (e.g. `int64(1)` and `true`, []int64{0} and
// int64(0)).
//
// These are all 8 byte length strings converted to a uint64 representation. A
// uint64 is used instead of the string directly as an optimization, it avoids
// the for loop in [xxhash] which adds minor overhead.
const (
boolID uint64 = 7953749933313450591 // "_boolean" (little endian)
int64ID uint64 = 7592915492740740150 // "64_bit_i" (little endian)
float64ID uint64 = 7376742710626956342 // "64_bit_f" (little endian)
stringID uint64 = 6874584755375207263 // "_string_" (little endian)
boolSliceID uint64 = 6875993255270243167 // "_[]bool_" (little endian)
int64SliceID uint64 = 3762322556277578591 // "_[]int64" (little endian)
float64SliceID uint64 = 7308324551835016539 // "[]double" (little endian)
stringSliceID uint64 = 7453010373645655387 // "[]string" (little endian)
)
// hashKVs returns a new xxHash64 hash of kvs.
func hashKVs(kvs []KeyValue) uint64 {
h := xxhash.New()
for _, kv := range kvs {
h = hashKV(h, kv)
}
return h.Sum64()
}
// hashKV returns the xxHash64 hash of kv with h as the base.
func hashKV(h xxhash.Hash, kv KeyValue) xxhash.Hash {
h = h.String(string(kv.Key))
switch kv.Value.Type() {
case BOOL:
h = h.Uint64(boolID)
h = h.Uint64(kv.Value.numeric)
case INT64:
h = h.Uint64(int64ID)
h = h.Uint64(kv.Value.numeric)
case FLOAT64:
h = h.Uint64(float64ID)
// Assumes numeric stored with math.Float64bits.
h = h.Uint64(kv.Value.numeric)
case STRING:
h = h.Uint64(stringID)
h = h.String(kv.Value.stringly)
case BOOLSLICE:
h = h.Uint64(boolSliceID)
rv := reflect.ValueOf(kv.Value.slice)
for i := 0; i < rv.Len(); i++ {
h = h.Bool(rv.Index(i).Bool())
}
case INT64SLICE:
h = h.Uint64(int64SliceID)
rv := reflect.ValueOf(kv.Value.slice)
for i := 0; i < rv.Len(); i++ {
h = h.Int64(rv.Index(i).Int())
}
case FLOAT64SLICE:
h = h.Uint64(float64SliceID)
rv := reflect.ValueOf(kv.Value.slice)
for i := 0; i < rv.Len(); i++ {
h = h.Float64(rv.Index(i).Float())
}
case STRINGSLICE:
h = h.Uint64(stringSliceID)
rv := reflect.ValueOf(kv.Value.slice)
for i := 0; i < rv.Len(); i++ {
h = h.String(rv.Index(i).String())
}
case INVALID:
default:
// Logging is an alternative, but using the internal logger here
// causes an import cycle so it is not done.
v := kv.Value.AsInterface()
msg := fmt.Sprintf("unknown value type: %[1]v (%[1]T)", v)
panic(msg)
}
return h
}

View File

@ -5,46 +5,42 @@
Package attribute provide several helper functions for some commonly used Package attribute provide several helper functions for some commonly used
logic of processing attributes. logic of processing attributes.
*/ */
package attribute // import "go.opentelemetry.io/otel/internal/attribute" package attribute // import "go.opentelemetry.io/otel/attribute/internal"
import ( import (
"reflect" "reflect"
) )
// BoolSliceValue converts a bool slice into an array with same elements as slice. // BoolSliceValue converts a bool slice into an array with same elements as slice.
func BoolSliceValue(v []bool) interface{} { func BoolSliceValue(v []bool) any {
var zero bool cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[bool]())).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v)) reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface() return cp.Interface()
} }
// Int64SliceValue converts an int64 slice into an array with same elements as slice. // Int64SliceValue converts an int64 slice into an array with same elements as slice.
func Int64SliceValue(v []int64) interface{} { func Int64SliceValue(v []int64) any {
var zero int64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]())).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v)) reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface() return cp.Interface()
} }
// Float64SliceValue converts a float64 slice into an array with same elements as slice. // Float64SliceValue converts a float64 slice into an array with same elements as slice.
func Float64SliceValue(v []float64) interface{} { func Float64SliceValue(v []float64) any {
var zero float64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[float64]())).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v)) reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface() return cp.Interface()
} }
// StringSliceValue converts a string slice into an array with same elements as slice. // StringSliceValue converts a string slice into an array with same elements as slice.
func StringSliceValue(v []string) interface{} { func StringSliceValue(v []string) any {
var zero string cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[string]())).Elem()
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(zero))).Elem()
reflect.Copy(cp, reflect.ValueOf(v)) reflect.Copy(cp, reflect.ValueOf(v))
return cp.Interface() return cp.Interface()
} }
// AsBoolSlice converts a bool array into a slice into with same elements as array. // AsBoolSlice converts a bool array into a slice into with same elements as array.
func AsBoolSlice(v interface{}) []bool { func AsBoolSlice(v any) []bool {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array { if rv.Type().Kind() != reflect.Array {
return nil return nil
@ -57,7 +53,7 @@ func AsBoolSlice(v interface{}) []bool {
} }
// AsInt64Slice converts an int64 array into a slice into with same elements as array. // AsInt64Slice converts an int64 array into a slice into with same elements as array.
func AsInt64Slice(v interface{}) []int64 { func AsInt64Slice(v any) []int64 {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array { if rv.Type().Kind() != reflect.Array {
return nil return nil
@ -70,7 +66,7 @@ func AsInt64Slice(v interface{}) []int64 {
} }
// AsFloat64Slice converts a float64 array into a slice into with same elements as array. // AsFloat64Slice converts a float64 array into a slice into with same elements as array.
func AsFloat64Slice(v interface{}) []float64 { func AsFloat64Slice(v any) []float64 {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array { if rv.Type().Kind() != reflect.Array {
return nil return nil
@ -83,7 +79,7 @@ func AsFloat64Slice(v interface{}) []float64 {
} }
// AsStringSlice converts a string array into a slice into with same elements as array. // AsStringSlice converts a string array into a slice into with same elements as array.
func AsStringSlice(v interface{}) []string { func AsStringSlice(v any) []string {
rv := reflect.ValueOf(v) rv := reflect.ValueOf(v)
if rv.Type().Kind() != reflect.Array { if rv.Type().Kind() != reflect.Array {
return nil return nil

View File

@ -0,0 +1,64 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package xxhash provides a wrapper around the xxhash library for attribute hashing.
package xxhash // import "go.opentelemetry.io/otel/attribute/internal/xxhash"
import (
"encoding/binary"
"math"
"github.com/cespare/xxhash/v2"
)
// Hash wraps xxhash.Digest to provide an API friendly for hashing attribute values.
type Hash struct {
d *xxhash.Digest
}
// New returns a new initialized xxHash64 hasher.
func New() Hash {
return Hash{d: xxhash.New()}
}
func (h Hash) Uint64(val uint64) Hash {
var buf [8]byte
binary.LittleEndian.PutUint64(buf[:], val)
// errors from Write are always nil for xxhash
// if it returns an err then panic
_, err := h.d.Write(buf[:])
if err != nil {
panic("xxhash write of uint64 failed: " + err.Error())
}
return h
}
func (h Hash) Bool(val bool) Hash { // nolint:revive // This is a hashing function.
if val {
return h.Uint64(1)
}
return h.Uint64(0)
}
func (h Hash) Float64(val float64) Hash {
return h.Uint64(math.Float64bits(val))
}
func (h Hash) Int64(val int64) Hash {
return h.Uint64(uint64(val)) // nolint:gosec // Overflow doesn't matter since we are hashing.
}
func (h Hash) String(val string) Hash {
// errors from WriteString are always nil for xxhash
// if it returns an err then panic
_, err := h.d.WriteString(val)
if err != nil {
panic("xxhash write of string failed: " + err.Error())
}
return h
}
// Sum64 returns the current hash value.
func (h Hash) Sum64() uint64 {
return h.d.Sum64()
}

View File

@ -25,8 +25,8 @@ type oneIterator struct {
attr KeyValue attr KeyValue
} }
// Next moves the iterator to the next position. Returns false if there are no // Next moves the iterator to the next position.
// more attributes. // Next reports whether there are more attributes.
func (i *Iterator) Next() bool { func (i *Iterator) Next() bool {
i.idx++ i.idx++
return i.idx < i.Len() return i.idx < i.Len()
@ -106,7 +106,8 @@ func (oi *oneIterator) advance() {
} }
} }
// Next returns true if there is another attribute available. // Next moves the iterator to the next position.
// Next reports whether there is another attribute available.
func (m *MergeIterator) Next() bool { func (m *MergeIterator) Next() bool {
if m.one.done && m.two.done { if m.one.done && m.two.done {
return false return false

View File

@ -117,7 +117,7 @@ func (k Key) StringSlice(v []string) KeyValue {
} }
} }
// Defined returns true for non-empty keys. // Defined reports whether the key is not empty.
func (k Key) Defined() bool { func (k Key) Defined() bool {
return len(k) != 0 return len(k) != 0
} }

View File

@ -13,7 +13,7 @@ type KeyValue struct {
Value Value Value Value
} }
// Valid returns if kv is a valid OpenTelemetry attribute. // Valid reports whether kv is a valid OpenTelemetry attribute.
func (kv KeyValue) Valid() bool { func (kv KeyValue) Valid() bool {
return kv.Key.Defined() && kv.Value.Type() != INVALID return kv.Key.Defined() && kv.Value.Type() != INVALID
} }

View File

@ -0,0 +1,37 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package attribute // import "go.opentelemetry.io/otel/attribute"
import (
"math"
)
func boolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag.
if b {
return 1
}
return 0
}
func rawToBool(r uint64) bool {
return r != 0
}
func int64ToRaw(i int64) uint64 {
// Assumes original was a valid int64 (overflow not checked).
return uint64(i) // nolint: gosec
}
func rawToInt64(r uint64) int64 {
// Assumes original was a valid int64 (overflow not checked).
return int64(r) // nolint: gosec
}
func float64ToRaw(f float64) uint64 {
return math.Float64bits(f)
}
func rawToFloat64(r uint64) float64 {
return math.Float64frombits(r)
}

View File

@ -9,6 +9,8 @@ import (
"reflect" "reflect"
"slices" "slices"
"sort" "sort"
"go.opentelemetry.io/otel/attribute/internal/xxhash"
) )
type ( type (
@ -23,19 +25,19 @@ type (
// the Equals method to ensure stable equivalence checking. // the Equals method to ensure stable equivalence checking.
// //
// Users should also use the Distinct returned from Equivalent as a map key // Users should also use the Distinct returned from Equivalent as a map key
// instead of a Set directly. In addition to that type providing guarantees // instead of a Set directly. Set has relatively poor performance when used
// on stable equivalence, it may also provide performance improvements. // as a map key compared to Distinct.
Set struct { Set struct {
equivalent Distinct hash uint64
data any
} }
// Distinct is a unique identifier of a Set. // Distinct is an identifier of a Set which is very likely to be unique.
// //
// Distinct is designed to be ensures equivalence stability: comparisons // Distinct should be used as a map key instead of a Set for to provide better
// will return the save value across versions. For this reason, Distinct // performance for map operations.
// should always be used as a map key instead of a Set.
Distinct struct { Distinct struct {
iface interface{} hash uint64
} }
// Sortable implements sort.Interface, used for sorting KeyValue. // Sortable implements sort.Interface, used for sorting KeyValue.
@ -46,15 +48,34 @@ type (
Sortable []KeyValue Sortable []KeyValue
) )
// Compile time check these types remain comparable.
var (
_ = isComparable(Set{})
_ = isComparable(Distinct{})
)
func isComparable[T comparable](t T) T { return t }
var ( var (
// keyValueType is used in computeDistinctReflect. // keyValueType is used in computeDistinctReflect.
keyValueType = reflect.TypeOf(KeyValue{}) keyValueType = reflect.TypeFor[KeyValue]()
// emptySet is returned for empty attribute sets. // emptyHash is the hash of an empty set.
emptySet = &Set{ emptyHash = xxhash.New().Sum64()
equivalent: Distinct{
iface: [0]KeyValue{}, // userDefinedEmptySet is an empty set. It was mistakenly exposed to users
}, // as something they can assign to, so it must remain addressable and
// mutable.
//
// This is kept for backwards compatibility, but should not be used in new code.
userDefinedEmptySet = &Set{
hash: emptyHash,
data: [0]KeyValue{},
}
emptySet = Set{
hash: emptyHash,
data: [0]KeyValue{},
} }
) )
@ -62,33 +83,35 @@ var (
// //
// This is a convenience provided for optimized calling utility. // This is a convenience provided for optimized calling utility.
func EmptySet() *Set { func EmptySet() *Set {
return emptySet // Continue to return the pointer to the user-defined empty set for
// backwards-compatibility.
//
// New code should not use this, instead use emptySet.
return userDefinedEmptySet
} }
// Valid reports whether this value refers to a valid Set.
func (d Distinct) Valid() bool { return d.hash != 0 }
// reflectValue abbreviates reflect.ValueOf(d). // reflectValue abbreviates reflect.ValueOf(d).
func (d Distinct) reflectValue() reflect.Value { func (l Set) reflectValue() reflect.Value {
return reflect.ValueOf(d.iface) return reflect.ValueOf(l.data)
}
// Valid returns true if this value refers to a valid Set.
func (d Distinct) Valid() bool {
return d.iface != nil
} }
// Len returns the number of attributes in this set. // Len returns the number of attributes in this set.
func (l *Set) Len() int { func (l *Set) Len() int {
if l == nil || !l.equivalent.Valid() { if l == nil || l.hash == 0 {
return 0 return 0
} }
return l.equivalent.reflectValue().Len() return l.reflectValue().Len()
} }
// Get returns the KeyValue at ordered position idx in this set. // Get returns the KeyValue at ordered position idx in this set.
func (l *Set) Get(idx int) (KeyValue, bool) { func (l *Set) Get(idx int) (KeyValue, bool) {
if l == nil || !l.equivalent.Valid() { if l == nil || l.hash == 0 {
return KeyValue{}, false return KeyValue{}, false
} }
value := l.equivalent.reflectValue() value := l.reflectValue()
if idx >= 0 && idx < value.Len() { if idx >= 0 && idx < value.Len() {
// Note: The Go compiler successfully avoids an allocation for // Note: The Go compiler successfully avoids an allocation for
@ -101,10 +124,10 @@ func (l *Set) Get(idx int) (KeyValue, bool) {
// Value returns the value of a specified key in this set. // Value returns the value of a specified key in this set.
func (l *Set) Value(k Key) (Value, bool) { func (l *Set) Value(k Key) (Value, bool) {
if l == nil || !l.equivalent.Valid() { if l == nil || l.hash == 0 {
return Value{}, false return Value{}, false
} }
rValue := l.equivalent.reflectValue() rValue := l.reflectValue()
vlen := rValue.Len() vlen := rValue.Len()
idx := sort.Search(vlen, func(idx int) bool { idx := sort.Search(vlen, func(idx int) bool {
@ -120,7 +143,7 @@ func (l *Set) Value(k Key) (Value, bool) {
return Value{}, false return Value{}, false
} }
// HasValue tests whether a key is defined in this set. // HasValue reports whether a key is defined in this set.
func (l *Set) HasValue(k Key) bool { func (l *Set) HasValue(k Key) bool {
if l == nil { if l == nil {
return false return false
@ -144,20 +167,29 @@ func (l *Set) ToSlice() []KeyValue {
return iter.ToSlice() return iter.ToSlice()
} }
// Equivalent returns a value that may be used as a map key. The Distinct type // Equivalent returns a value that may be used as a map key. Equal Distinct
// guarantees that the result will equal the equivalent. Distinct value of any // values are very likely to be equivalent attribute Sets. Distinct value of any
// attribute set with the same elements as this, where sets are made unique by // attribute set with the same elements as this, where sets are made unique by
// choosing the last value in the input for any given key. // choosing the last value in the input for any given key.
func (l *Set) Equivalent() Distinct { func (l *Set) Equivalent() Distinct {
if l == nil || !l.equivalent.Valid() { if l == nil || l.hash == 0 {
return emptySet.equivalent return Distinct{hash: emptySet.hash}
} }
return l.equivalent return Distinct{hash: l.hash}
} }
// Equals returns true if the argument set is equivalent to this set. // Equals reports whether the argument set is equivalent to this set.
func (l *Set) Equals(o *Set) bool { func (l *Set) Equals(o *Set) bool {
return l.Equivalent() == o.Equivalent() if l.Equivalent() != o.Equivalent() {
return false
}
if l == nil || l.hash == 0 {
l = &emptySet
}
if o == nil || o.hash == 0 {
o = &emptySet
}
return l.data == o.data
} }
// Encoded returns the encoded form of this set, according to encoder. // Encoded returns the encoded form of this set, according to encoder.
@ -169,12 +201,6 @@ func (l *Set) Encoded(encoder Encoder) string {
return encoder.Encode(l.Iter()) return encoder.Encode(l.Iter())
} }
func empty() Set {
return Set{
equivalent: emptySet.equivalent,
}
}
// NewSet returns a new Set. See the documentation for // NewSet returns a new Set. See the documentation for
// NewSetWithSortableFiltered for more details. // NewSetWithSortableFiltered for more details.
// //
@ -204,7 +230,7 @@ func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set {
func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// Check for empty set. // Check for empty set.
if len(kvs) == 0 { if len(kvs) == 0 {
return empty(), nil return emptySet, nil
} }
// Stable sort so the following de-duplication can implement // Stable sort so the following de-duplication can implement
@ -233,10 +259,10 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
if filter != nil { if filter != nil {
if div := filteredToFront(kvs, filter); div != 0 { if div := filteredToFront(kvs, filter); div != 0 {
return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div] return newSet(kvs[div:]), kvs[:div]
} }
} }
return Set{equivalent: computeDistinct(kvs)}, nil return newSet(kvs), nil
} }
// NewSetWithSortableFiltered returns a new Set. // NewSetWithSortableFiltered returns a new Set.
@ -316,7 +342,7 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if first == 0 { if first == 0 {
// It is safe to assume len(slice) >= 1 given we found at least one // It is safe to assume len(slice) >= 1 given we found at least one
// attribute above that needs to be filtered out. // attribute above that needs to be filtered out.
return Set{equivalent: computeDistinct(slice[1:])}, slice[:1] return newSet(slice[1:]), slice[:1]
} }
// Move the filtered slice[first] to the front (preserving order). // Move the filtered slice[first] to the front (preserving order).
@ -326,25 +352,24 @@ func (l *Set) Filter(re Filter) (Set, []KeyValue) {
// Do not re-evaluate re(slice[first+1:]). // Do not re-evaluate re(slice[first+1:]).
div := filteredToFront(slice[1:first+1], re) + 1 div := filteredToFront(slice[1:first+1], re) + 1
return Set{equivalent: computeDistinct(slice[div:])}, slice[:div] return newSet(slice[div:]), slice[:div]
} }
// computeDistinct returns a Distinct using either the fixed- or // newSet returns a new set based on the sorted and uniqued kvs.
// reflect-oriented code path, depending on the size of the input. The input func newSet(kvs []KeyValue) Set {
// slice is assumed to already be sorted and de-duplicated. s := Set{
func computeDistinct(kvs []KeyValue) Distinct { hash: hashKVs(kvs),
iface := computeDistinctFixed(kvs) data: computeDataFixed(kvs),
if iface == nil {
iface = computeDistinctReflect(kvs)
} }
return Distinct{ if s.data == nil {
iface: iface, s.data = computeDataReflect(kvs)
} }
return s
} }
// computeDistinctFixed computes a Distinct for small slices. It returns nil // computeDataFixed computes a Set data for small slices. It returns nil if the
// if the input is too large for this code path. // input is too large for this code path.
func computeDistinctFixed(kvs []KeyValue) interface{} { func computeDataFixed(kvs []KeyValue) any {
switch len(kvs) { switch len(kvs) {
case 1: case 1:
return [1]KeyValue(kvs) return [1]KeyValue(kvs)
@ -371,9 +396,9 @@ func computeDistinctFixed(kvs []KeyValue) interface{} {
} }
} }
// computeDistinctReflect computes a Distinct using reflection, works for any // computeDataReflect computes a Set data using reflection, works for any size
// size input. // input.
func computeDistinctReflect(kvs []KeyValue) interface{} { func computeDataReflect(kvs []KeyValue) any {
at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem()
for i, keyValue := range kvs { for i, keyValue := range kvs {
*(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue
@ -383,11 +408,11 @@ func computeDistinctReflect(kvs []KeyValue) interface{} {
// MarshalJSON returns the JSON encoding of the Set. // MarshalJSON returns the JSON encoding of the Set.
func (l *Set) MarshalJSON() ([]byte, error) { func (l *Set) MarshalJSON() ([]byte, error) {
return json.Marshal(l.equivalent.iface) return json.Marshal(l.data)
} }
// MarshalLog is the marshaling function used by the logging system to represent this Set. // MarshalLog is the marshaling function used by the logging system to represent this Set.
func (l Set) MarshalLog() interface{} { func (l Set) MarshalLog() any {
kvs := make(map[string]string) kvs := make(map[string]string)
for _, kv := range l.ToSlice() { for _, kv := range l.ToSlice() {
kvs[string(kv.Key)] = kv.Value.Emit() kvs[string(kv.Key)] = kv.Value.Emit()

View File

@ -24,8 +24,9 @@ const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICE
var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71}
func (i Type) String() string { func (i Type) String() string {
if i < 0 || i >= Type(len(_Type_index)-1) { idx := int(i) - 0
if i < 0 || idx >= len(_Type_index)-1 {
return "Type(" + strconv.FormatInt(int64(i), 10) + ")" return "Type(" + strconv.FormatInt(int64(i), 10) + ")"
} }
return _Type_name[_Type_index[i]:_Type_index[i+1]] return _Type_name[_Type_index[idx]:_Type_index[idx+1]]
} }

View File

@ -9,8 +9,7 @@ import (
"reflect" "reflect"
"strconv" "strconv"
"go.opentelemetry.io/otel/internal" attribute "go.opentelemetry.io/otel/attribute/internal"
"go.opentelemetry.io/otel/internal/attribute"
) )
//go:generate stringer -type=Type //go:generate stringer -type=Type
@ -23,7 +22,7 @@ type Value struct {
vtype Type vtype Type
numeric uint64 numeric uint64
stringly string stringly string
slice interface{} slice any
} }
const ( const (
@ -51,7 +50,7 @@ const (
func BoolValue(v bool) Value { func BoolValue(v bool) Value {
return Value{ return Value{
vtype: BOOL, vtype: BOOL,
numeric: internal.BoolToRaw(v), numeric: boolToRaw(v),
} }
} }
@ -67,8 +66,7 @@ func IntValue(v int) Value {
// IntSliceValue creates an INTSLICE Value. // IntSliceValue creates an INTSLICE Value.
func IntSliceValue(v []int) Value { func IntSliceValue(v []int) Value {
var int64Val int64 cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeFor[int64]()))
cp := reflect.New(reflect.ArrayOf(len(v), reflect.TypeOf(int64Val)))
for i, val := range v { for i, val := range v {
cp.Elem().Index(i).SetInt(int64(val)) cp.Elem().Index(i).SetInt(int64(val))
} }
@ -82,7 +80,7 @@ func IntSliceValue(v []int) Value {
func Int64Value(v int64) Value { func Int64Value(v int64) Value {
return Value{ return Value{
vtype: INT64, vtype: INT64,
numeric: internal.Int64ToRaw(v), numeric: int64ToRaw(v),
} }
} }
@ -95,7 +93,7 @@ func Int64SliceValue(v []int64) Value {
func Float64Value(v float64) Value { func Float64Value(v float64) Value {
return Value{ return Value{
vtype: FLOAT64, vtype: FLOAT64,
numeric: internal.Float64ToRaw(v), numeric: float64ToRaw(v),
} }
} }
@ -125,7 +123,7 @@ func (v Value) Type() Type {
// AsBool returns the bool value. Make sure that the Value's type is // AsBool returns the bool value. Make sure that the Value's type is
// BOOL. // BOOL.
func (v Value) AsBool() bool { func (v Value) AsBool() bool {
return internal.RawToBool(v.numeric) return rawToBool(v.numeric)
} }
// AsBoolSlice returns the []bool value. Make sure that the Value's type is // AsBoolSlice returns the []bool value. Make sure that the Value's type is
@ -144,7 +142,7 @@ func (v Value) asBoolSlice() []bool {
// AsInt64 returns the int64 value. Make sure that the Value's type is // AsInt64 returns the int64 value. Make sure that the Value's type is
// INT64. // INT64.
func (v Value) AsInt64() int64 { func (v Value) AsInt64() int64 {
return internal.RawToInt64(v.numeric) return rawToInt64(v.numeric)
} }
// AsInt64Slice returns the []int64 value. Make sure that the Value's type is // AsInt64Slice returns the []int64 value. Make sure that the Value's type is
@ -163,7 +161,7 @@ func (v Value) asInt64Slice() []int64 {
// AsFloat64 returns the float64 value. Make sure that the Value's // AsFloat64 returns the float64 value. Make sure that the Value's
// type is FLOAT64. // type is FLOAT64.
func (v Value) AsFloat64() float64 { func (v Value) AsFloat64() float64 {
return internal.RawToFloat64(v.numeric) return rawToFloat64(v.numeric)
} }
// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is // AsFloat64Slice returns the []float64 value. Make sure that the Value's type is
@ -200,8 +198,8 @@ func (v Value) asStringSlice() []string {
type unknownValueType struct{} type unknownValueType struct{}
// AsInterface returns Value's data as interface{}. // AsInterface returns Value's data as any.
func (v Value) AsInterface() interface{} { func (v Value) AsInterface() any {
switch v.Type() { switch v.Type() {
case BOOL: case BOOL:
return v.AsBool() return v.AsBool()
@ -263,7 +261,7 @@ func (v Value) Emit() string {
func (v Value) MarshalJSON() ([]byte, error) { func (v Value) MarshalJSON() ([]byte, error) {
var jsonVal struct { var jsonVal struct {
Type string Type string
Value interface{} Value any
} }
jsonVal.Type = v.Type().String() jsonVal.Type = v.Type().String()
jsonVal.Value = v.AsInterface() jsonVal.Value = v.AsInterface()

View File

@ -317,7 +317,7 @@ func parseMember(member string) (Member, error) {
keyValue, properties, found := strings.Cut(member, propertyDelimiter) keyValue, properties, found := strings.Cut(member, propertyDelimiter)
if found { if found {
// Parse the member properties. // Parse the member properties.
for _, pStr := range strings.Split(properties, propertyDelimiter) { for pStr := range strings.SplitSeq(properties, propertyDelimiter) {
p, err := parseProperty(pStr) p, err := parseProperty(pStr)
if err != nil { if err != nil {
return newInvalidMember(), err return newInvalidMember(), err
@ -480,7 +480,7 @@ func Parse(bStr string) (Baggage, error) {
} }
b := make(baggage.List) b := make(baggage.List)
for _, memberStr := range strings.Split(bStr, listDelimiter) { for memberStr := range strings.SplitSeq(bStr, listDelimiter) {
m, err := parseMember(memberStr) m, err := parseMember(memberStr)
if err != nil { if err != nil {
return Baggage{}, err return Baggage{}, err
@ -648,7 +648,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// If we couldn't find any valid key character, // If we couldn't find any valid key character,
// it means the key is either empty or invalid. // it means the key is either empty or invalid.
if keyStart == keyEnd { if keyStart == keyEnd {
return return p, ok
} }
// Skip spaces after the key: " key< >= value ". // Skip spaces after the key: " key< >= value ".
@ -658,13 +658,13 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// A key can have no value, like: " key ". // A key can have no value, like: " key ".
ok = true ok = true
p.key = s[keyStart:keyEnd] p.key = s[keyStart:keyEnd]
return return p, ok
} }
// If we have not reached the end and we can't find the '=' delimiter, // If we have not reached the end and we can't find the '=' delimiter,
// it means the property is invalid. // it means the property is invalid.
if s[index] != keyValueDelimiter[0] { if s[index] != keyValueDelimiter[0] {
return return p, ok
} }
// Attempting to parse the value. // Attempting to parse the value.
@ -690,14 +690,14 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
// we have not reached the end, it means the property is // we have not reached the end, it means the property is
// invalid, something like: " key = value value1". // invalid, something like: " key = value value1".
if index != len(s) { if index != len(s) {
return return p, ok
} }
// Decode a percent-encoded value. // Decode a percent-encoded value.
rawVal := s[valueStart:valueEnd] rawVal := s[valueStart:valueEnd]
unescapeVal, err := url.PathUnescape(rawVal) unescapeVal, err := url.PathUnescape(rawVal)
if err != nil { if err != nil {
return return p, ok
} }
value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal) value := replaceInvalidUTF8Sequences(len(rawVal), unescapeVal)
@ -706,7 +706,7 @@ func parsePropertyInternal(s string) (p Property, ok bool) {
p.hasValue = true p.hasValue = true
p.value = value p.value = value
return return p, ok
} }
func skipSpace(s string, offset int) int { func skipSpace(s string, offset int) int {
@ -812,7 +812,7 @@ var safeKeyCharset = [utf8.RuneSelf]bool{
// validateBaggageName checks if the string is a valid OpenTelemetry Baggage name. // validateBaggageName checks if the string is a valid OpenTelemetry Baggage name.
// Baggage name is a valid, non-empty UTF-8 string. // Baggage name is a valid, non-empty UTF-8 string.
func validateBaggageName(s string) bool { func validateBaggageName(s string) bool {
if len(s) == 0 { if s == "" {
return false return false
} }
@ -828,7 +828,7 @@ func validateBaggageValue(s string) bool {
// validateKey checks if the string is a valid W3C Baggage key. // validateKey checks if the string is a valid W3C Baggage key.
func validateKey(s string) bool { func validateKey(s string) bool {
if len(s) == 0 { if s == "" {
return false return false
} }

View File

@ -67,7 +67,7 @@ func (c *Code) UnmarshalJSON(b []byte) error {
return errors.New("nil receiver passed to UnmarshalJSON") return errors.New("nil receiver passed to UnmarshalJSON")
} }
var x interface{} var x any
if err := json.Unmarshal(b, &x); err != nil { if err := json.Unmarshal(b, &x); err != nil {
return err return err
} }
@ -102,5 +102,5 @@ func (c *Code) MarshalJSON() ([]byte, error) {
if !ok { if !ok {
return nil, fmt.Errorf("invalid code: %d", *c) return nil, fmt.Errorf("invalid code: %d", *c)
} }
return []byte(fmt.Sprintf("%q", str)), nil return fmt.Appendf(nil, "%q", str), nil
} }

View File

@ -1,3 +1,4 @@
# This is a renovate-friendly source of Docker images. # This is a renovate-friendly source of Docker images.
FROM python:3.13.2-slim-bullseye@sha256:31b581c8218e1f3c58672481b3b7dba8e898852866b408c6a984c22832523935 AS python FROM python:3.13.6-slim-bullseye@sha256:e98b521460ee75bca92175c16247bdf7275637a8faaeb2bcfa19d879ae5c4b9a AS python
FROM otel/weaver:v0.13.2@sha256:ae7346b992e477f629ea327e0979e8a416a97f7956ab1f7e95ac1f44edf1a893 AS weaver FROM otel/weaver:v0.20.0@sha256:fa4f1c6954ecea78ab1a4e865bd6f5b4aaba80c1896f9f4a11e2c361d04e197e AS weaver
FROM avtodev/markdown-lint:v1@sha256:6aeedc2f49138ce7a1cd0adffc1b1c0321b841dc2102408967d9301c031949ee AS markdown

View File

@ -1,30 +0,0 @@
#!/usr/bin/env bash
# Copyright The OpenTelemetry Authors
# SPDX-License-Identifier: Apache-2.0
set -euo pipefail
top_dir='.'
if [[ $# -gt 0 ]]; then
top_dir="${1}"
fi
p=$(pwd)
mod_dirs=()
# Note `mapfile` does not exist in older bash versions:
# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash
while IFS= read -r line; do
mod_dirs+=("$line")
done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort)
for mod_dir in "${mod_dirs[@]}"; do
cd "${mod_dir}"
while IFS= read -r line; do
echo ".${line#${p}}"
done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|')
cd "${p}"
done

View File

@ -1,18 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/internal"
//go:generate gotmpl --body=./shared/matchers/expectation.go.tmpl "--data={}" --out=matchers/expectation.go
//go:generate gotmpl --body=./shared/matchers/expecter.go.tmpl "--data={}" --out=matchers/expecter.go
//go:generate gotmpl --body=./shared/matchers/temporal_matcher.go.tmpl "--data={}" --out=matchers/temporal_matcher.go
//go:generate gotmpl --body=./shared/internaltest/alignment.go.tmpl "--data={}" --out=internaltest/alignment.go
//go:generate gotmpl --body=./shared/internaltest/env.go.tmpl "--data={}" --out=internaltest/env.go
//go:generate gotmpl --body=./shared/internaltest/env_test.go.tmpl "--data={}" --out=internaltest/env_test.go
//go:generate gotmpl --body=./shared/internaltest/errors.go.tmpl "--data={}" --out=internaltest/errors.go
//go:generate gotmpl --body=./shared/internaltest/harness.go.tmpl "--data={\"matchersImportPath\": \"go.opentelemetry.io/otel/internal/matchers\"}" --out=internaltest/harness.go
//go:generate gotmpl --body=./shared/internaltest/text_map_carrier.go.tmpl "--data={}" --out=internaltest/text_map_carrier.go
//go:generate gotmpl --body=./shared/internaltest/text_map_carrier_test.go.tmpl "--data={}" --out=internaltest/text_map_carrier_test.go
//go:generate gotmpl --body=./shared/internaltest/text_map_propagator.go.tmpl "--data={}" --out=internaltest/text_map_propagator.go
//go:generate gotmpl --body=./shared/internaltest/text_map_propagator_test.go.tmpl "--data={}" --out=internaltest/text_map_propagator_test.go

View File

@ -1,6 +1,7 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// Package global provides the OpenTelemetry global API.
package global // import "go.opentelemetry.io/otel/internal/global" package global // import "go.opentelemetry.io/otel/internal/global"
import ( import (

View File

@ -229,6 +229,13 @@ func (i *sfCounter) Add(ctx context.Context, incr float64, opts ...metric.AddOpt
} }
} }
func (i *sfCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64Counter).Enabled(ctx)
}
return false
}
type sfUpDownCounter struct { type sfUpDownCounter struct {
embedded.Float64UpDownCounter embedded.Float64UpDownCounter
@ -255,6 +262,13 @@ func (i *sfUpDownCounter) Add(ctx context.Context, incr float64, opts ...metric.
} }
} }
func (i *sfUpDownCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64UpDownCounter).Enabled(ctx)
}
return false
}
type sfHistogram struct { type sfHistogram struct {
embedded.Float64Histogram embedded.Float64Histogram
@ -281,6 +295,13 @@ func (i *sfHistogram) Record(ctx context.Context, x float64, opts ...metric.Reco
} }
} }
func (i *sfHistogram) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64Histogram).Enabled(ctx)
}
return false
}
type sfGauge struct { type sfGauge struct {
embedded.Float64Gauge embedded.Float64Gauge
@ -307,6 +328,13 @@ func (i *sfGauge) Record(ctx context.Context, x float64, opts ...metric.RecordOp
} }
} }
func (i *sfGauge) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Float64Gauge).Enabled(ctx)
}
return false
}
type siCounter struct { type siCounter struct {
embedded.Int64Counter embedded.Int64Counter
@ -333,6 +361,13 @@ func (i *siCounter) Add(ctx context.Context, x int64, opts ...metric.AddOption)
} }
} }
func (i *siCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64Counter).Enabled(ctx)
}
return false
}
type siUpDownCounter struct { type siUpDownCounter struct {
embedded.Int64UpDownCounter embedded.Int64UpDownCounter
@ -359,6 +394,13 @@ func (i *siUpDownCounter) Add(ctx context.Context, x int64, opts ...metric.AddOp
} }
} }
func (i *siUpDownCounter) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64UpDownCounter).Enabled(ctx)
}
return false
}
type siHistogram struct { type siHistogram struct {
embedded.Int64Histogram embedded.Int64Histogram
@ -385,6 +427,13 @@ func (i *siHistogram) Record(ctx context.Context, x int64, opts ...metric.Record
} }
} }
func (i *siHistogram) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64Histogram).Enabled(ctx)
}
return false
}
type siGauge struct { type siGauge struct {
embedded.Int64Gauge embedded.Int64Gauge
@ -410,3 +459,10 @@ func (i *siGauge) Record(ctx context.Context, x int64, opts ...metric.RecordOpti
ctr.(metric.Int64Gauge).Record(ctx, x, opts...) ctr.(metric.Int64Gauge).Record(ctx, x, opts...)
} }
} }
func (i *siGauge) Enabled(ctx context.Context) bool {
if ctr := i.delegate.Load(); ctr != nil {
return ctr.(metric.Int64Gauge).Enabled(ctx)
}
return false
}

View File

@ -41,22 +41,22 @@ func GetLogger() logr.Logger {
// Info prints messages about the general state of the API or SDK. // Info prints messages about the general state of the API or SDK.
// This should usually be less than 5 messages a minute. // This should usually be less than 5 messages a minute.
func Info(msg string, keysAndValues ...interface{}) { func Info(msg string, keysAndValues ...any) {
GetLogger().V(4).Info(msg, keysAndValues...) GetLogger().V(4).Info(msg, keysAndValues...)
} }
// Error prints messages about exceptional states of the API or SDK. // Error prints messages about exceptional states of the API or SDK.
func Error(err error, msg string, keysAndValues ...interface{}) { func Error(err error, msg string, keysAndValues ...any) {
GetLogger().Error(err, msg, keysAndValues...) GetLogger().Error(err, msg, keysAndValues...)
} }
// Debug prints messages about all internal changes in the API or SDK. // Debug prints messages about all internal changes in the API or SDK.
func Debug(msg string, keysAndValues ...interface{}) { func Debug(msg string, keysAndValues ...any) {
GetLogger().V(8).Info(msg, keysAndValues...) GetLogger().V(8).Info(msg, keysAndValues...)
} }
// Warn prints messages about warnings in the API or SDK. // Warn prints messages about warnings in the API or SDK.
// Not an error but is likely more important than an informational event. // Not an error but is likely more important than an informational event.
func Warn(msg string, keysAndValues ...interface{}) { func Warn(msg string, keysAndValues ...any) {
GetLogger().V(1).Info(msg, keysAndValues...) GetLogger().V(1).Info(msg, keysAndValues...)
} }

View File

@ -105,7 +105,7 @@ type delegatedInstrument interface {
setDelegate(metric.Meter) setDelegate(metric.Meter)
} }
// instID are the identifying properties of a instrument. // instID are the identifying properties of an instrument.
type instID struct { type instID struct {
// name is the name of the stream. // name is the name of the stream.
name string name string
@ -157,7 +157,7 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption)
cfg := metric.NewInt64CounterConfig(options...) cfg := metric.NewInt64CounterConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*siCounter)(nil)), kind: reflect.TypeFor[*siCounter](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -169,7 +169,10 @@ func (m *meter) Int64Counter(name string, options ...metric.Int64CounterOption)
return i, nil return i, nil
} }
func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) { func (m *meter) Int64UpDownCounter(
name string,
options ...metric.Int64UpDownCounterOption,
) (metric.Int64UpDownCounter, error) {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
@ -180,7 +183,7 @@ func (m *meter) Int64UpDownCounter(name string, options ...metric.Int64UpDownCou
cfg := metric.NewInt64UpDownCounterConfig(options...) cfg := metric.NewInt64UpDownCounterConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*siUpDownCounter)(nil)), kind: reflect.TypeFor[*siUpDownCounter](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -203,7 +206,7 @@ func (m *meter) Int64Histogram(name string, options ...metric.Int64HistogramOpti
cfg := metric.NewInt64HistogramConfig(options...) cfg := metric.NewInt64HistogramConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*siHistogram)(nil)), kind: reflect.TypeFor[*siHistogram](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -226,7 +229,7 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met
cfg := metric.NewInt64GaugeConfig(options...) cfg := metric.NewInt64GaugeConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*siGauge)(nil)), kind: reflect.TypeFor[*siGauge](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -238,7 +241,10 @@ func (m *meter) Int64Gauge(name string, options ...metric.Int64GaugeOption) (met
return i, nil return i, nil
} }
func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64ObservableCounterOption) (metric.Int64ObservableCounter, error) { func (m *meter) Int64ObservableCounter(
name string,
options ...metric.Int64ObservableCounterOption,
) (metric.Int64ObservableCounter, error) {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
@ -249,7 +255,7 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser
cfg := metric.NewInt64ObservableCounterConfig(options...) cfg := metric.NewInt64ObservableCounterConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*aiCounter)(nil)), kind: reflect.TypeFor[*aiCounter](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -261,7 +267,10 @@ func (m *meter) Int64ObservableCounter(name string, options ...metric.Int64Obser
return i, nil return i, nil
} }
func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int64ObservableUpDownCounterOption) (metric.Int64ObservableUpDownCounter, error) { func (m *meter) Int64ObservableUpDownCounter(
name string,
options ...metric.Int64ObservableUpDownCounterOption,
) (metric.Int64ObservableUpDownCounter, error) {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
@ -272,7 +281,7 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6
cfg := metric.NewInt64ObservableUpDownCounterConfig(options...) cfg := metric.NewInt64ObservableUpDownCounterConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*aiUpDownCounter)(nil)), kind: reflect.TypeFor[*aiUpDownCounter](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -284,7 +293,10 @@ func (m *meter) Int64ObservableUpDownCounter(name string, options ...metric.Int6
return i, nil return i, nil
} }
func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) { func (m *meter) Int64ObservableGauge(
name string,
options ...metric.Int64ObservableGaugeOption,
) (metric.Int64ObservableGauge, error) {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
@ -295,7 +307,7 @@ func (m *meter) Int64ObservableGauge(name string, options ...metric.Int64Observa
cfg := metric.NewInt64ObservableGaugeConfig(options...) cfg := metric.NewInt64ObservableGaugeConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*aiGauge)(nil)), kind: reflect.TypeFor[*aiGauge](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -318,7 +330,7 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti
cfg := metric.NewFloat64CounterConfig(options...) cfg := metric.NewFloat64CounterConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*sfCounter)(nil)), kind: reflect.TypeFor[*sfCounter](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -330,7 +342,10 @@ func (m *meter) Float64Counter(name string, options ...metric.Float64CounterOpti
return i, nil return i, nil
} }
func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) { func (m *meter) Float64UpDownCounter(
name string,
options ...metric.Float64UpDownCounterOption,
) (metric.Float64UpDownCounter, error) {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
@ -341,7 +356,7 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow
cfg := metric.NewFloat64UpDownCounterConfig(options...) cfg := metric.NewFloat64UpDownCounterConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*sfUpDownCounter)(nil)), kind: reflect.TypeFor[*sfUpDownCounter](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -353,7 +368,10 @@ func (m *meter) Float64UpDownCounter(name string, options ...metric.Float64UpDow
return i, nil return i, nil
} }
func (m *meter) Float64Histogram(name string, options ...metric.Float64HistogramOption) (metric.Float64Histogram, error) { func (m *meter) Float64Histogram(
name string,
options ...metric.Float64HistogramOption,
) (metric.Float64Histogram, error) {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
@ -364,7 +382,7 @@ func (m *meter) Float64Histogram(name string, options ...metric.Float64Histogram
cfg := metric.NewFloat64HistogramConfig(options...) cfg := metric.NewFloat64HistogramConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*sfHistogram)(nil)), kind: reflect.TypeFor[*sfHistogram](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -387,7 +405,7 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption)
cfg := metric.NewFloat64GaugeConfig(options...) cfg := metric.NewFloat64GaugeConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*sfGauge)(nil)), kind: reflect.TypeFor[*sfGauge](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -399,7 +417,10 @@ func (m *meter) Float64Gauge(name string, options ...metric.Float64GaugeOption)
return i, nil return i, nil
} }
func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64ObservableCounterOption) (metric.Float64ObservableCounter, error) { func (m *meter) Float64ObservableCounter(
name string,
options ...metric.Float64ObservableCounterOption,
) (metric.Float64ObservableCounter, error) {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
@ -410,7 +431,7 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O
cfg := metric.NewFloat64ObservableCounterConfig(options...) cfg := metric.NewFloat64ObservableCounterConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*afCounter)(nil)), kind: reflect.TypeFor[*afCounter](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -422,7 +443,10 @@ func (m *meter) Float64ObservableCounter(name string, options ...metric.Float64O
return i, nil return i, nil
} }
func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Float64ObservableUpDownCounterOption) (metric.Float64ObservableUpDownCounter, error) { func (m *meter) Float64ObservableUpDownCounter(
name string,
options ...metric.Float64ObservableUpDownCounterOption,
) (metric.Float64ObservableUpDownCounter, error) {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
@ -433,7 +457,7 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl
cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...) cfg := metric.NewFloat64ObservableUpDownCounterConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*afUpDownCounter)(nil)), kind: reflect.TypeFor[*afUpDownCounter](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }
@ -445,7 +469,10 @@ func (m *meter) Float64ObservableUpDownCounter(name string, options ...metric.Fl
return i, nil return i, nil
} }
func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64ObservableGaugeOption) (metric.Float64ObservableGauge, error) { func (m *meter) Float64ObservableGauge(
name string,
options ...metric.Float64ObservableGaugeOption,
) (metric.Float64ObservableGauge, error) {
m.mtx.Lock() m.mtx.Lock()
defer m.mtx.Unlock() defer m.mtx.Unlock()
@ -456,7 +483,7 @@ func (m *meter) Float64ObservableGauge(name string, options ...metric.Float64Obs
cfg := metric.NewFloat64ObservableGaugeConfig(options...) cfg := metric.NewFloat64ObservableGaugeConfig(options...)
id := instID{ id := instID{
name: name, name: name,
kind: reflect.TypeOf((*afGauge)(nil)), kind: reflect.TypeFor[*afGauge](),
description: cfg.Description(), description: cfg.Description(),
unit: cfg.Unit(), unit: cfg.Unit(),
} }

View File

@ -26,6 +26,7 @@ import (
"sync/atomic" "sync/atomic"
"go.opentelemetry.io/auto/sdk" "go.opentelemetry.io/auto/sdk"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes" "go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
@ -158,7 +159,18 @@ func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStart
// a nonRecordingSpan by default. // a nonRecordingSpan by default.
var autoInstEnabled = new(bool) var autoInstEnabled = new(bool)
func (t *tracer) newSpan(ctx context.Context, autoSpan *bool, name string, opts []trace.SpanStartOption) (context.Context, trace.Span) { // newSpan is called by tracer.Start so auto-instrumentation can attach an eBPF
// uprobe to this code.
//
// "noinline" pragma prevents the method from ever being inlined.
//
//go:noinline
func (t *tracer) newSpan(
ctx context.Context,
autoSpan *bool,
name string,
opts []trace.SpanStartOption,
) (context.Context, trace.Span) {
// autoInstEnabled is passed to newSpan via the autoSpan parameter. This is // autoInstEnabled is passed to newSpan via the autoSpan parameter. This is
// so the auto-instrumentation can define a uprobe for (*t).newSpan and be // so the auto-instrumentation can define a uprobe for (*t).newSpan and be
// provided with the address of the bool autoInstEnabled points to. It // provided with the address of the bool autoInstEnabled points to. It

View File

@ -1,48 +0,0 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
package internal // import "go.opentelemetry.io/otel/internal"
import (
"math"
"unsafe"
)
func BoolToRaw(b bool) uint64 { // nolint:revive // b is not a control flag.
if b {
return 1
}
return 0
}
func RawToBool(r uint64) bool {
return r != 0
}
func Int64ToRaw(i int64) uint64 {
// Assumes original was a valid int64 (overflow not checked).
return uint64(i) // nolint: gosec
}
func RawToInt64(r uint64) int64 {
// Assumes original was a valid int64 (overflow not checked).
return int64(r) // nolint: gosec
}
func Float64ToRaw(f float64) uint64 {
return math.Float64bits(f)
}
func RawToFloat64(r uint64) float64 {
return math.Float64frombits(r)
}
func RawPtrToFloat64Ptr(r *uint64) *float64 {
// Assumes original was a valid *float64 (overflow not checked).
return (*float64)(unsafe.Pointer(r)) // nolint: gosec
}
func RawPtrToInt64Ptr(r *uint64) *int64 {
// Assumes original was a valid *int64 (overflow not checked).
return (*int64)(unsafe.Pointer(r)) // nolint: gosec
}

View File

@ -11,7 +11,7 @@ import (
// Meter returns a Meter from the global MeterProvider. The name must be the // Meter returns a Meter from the global MeterProvider. The name must be the
// name of the library providing instrumentation. This name may be the same as // name of the library providing instrumentation. This name may be the same as
// the instrumented code only if that code provides built-in instrumentation. // the instrumented code only if that code provides built-in instrumentation.
// If the name is empty, then a implementation defined default name will be // If the name is empty, then an implementation defined default name will be
// used instead. // used instead.
// //
// If this is called before a global MeterProvider is registered the returned // If this is called before a global MeterProvider is registered the returned

View File

@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
--------------------------------------------------------------------------------
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -106,7 +106,9 @@ type Float64ObservableUpDownCounterConfig struct {
// NewFloat64ObservableUpDownCounterConfig returns a new // NewFloat64ObservableUpDownCounterConfig returns a new
// [Float64ObservableUpDownCounterConfig] with all opts applied. // [Float64ObservableUpDownCounterConfig] with all opts applied.
func NewFloat64ObservableUpDownCounterConfig(opts ...Float64ObservableUpDownCounterOption) Float64ObservableUpDownCounterConfig { func NewFloat64ObservableUpDownCounterConfig(
opts ...Float64ObservableUpDownCounterOption,
) Float64ObservableUpDownCounterConfig {
var config Float64ObservableUpDownCounterConfig var config Float64ObservableUpDownCounterConfig
for _, o := range opts { for _, o := range opts {
config = o.applyFloat64ObservableUpDownCounter(config) config = o.applyFloat64ObservableUpDownCounter(config)
@ -225,7 +227,11 @@ type Float64Observer interface {
// attributes as another Float64Callbacks also registered for the same // attributes as another Float64Callbacks also registered for the same
// instrument. // instrument.
// //
// The function needs to be concurrent safe. // The function needs to be reentrant and concurrent safe.
//
// Note that Go's mutexes are not reentrant, and locking a mutex takes
// an indefinite amount of time. It is therefore advised to avoid
// using mutexes inside callbacks.
type Float64Callback func(context.Context, Float64Observer) error type Float64Callback func(context.Context, Float64Observer) error
// Float64ObservableOption applies options to float64 Observer instruments. // Float64ObservableOption applies options to float64 Observer instruments.
@ -239,12 +245,16 @@ type float64CallbackOpt struct {
cback Float64Callback cback Float64Callback
} }
func (o float64CallbackOpt) applyFloat64ObservableCounter(cfg Float64ObservableCounterConfig) Float64ObservableCounterConfig { func (o float64CallbackOpt) applyFloat64ObservableCounter(
cfg Float64ObservableCounterConfig,
) Float64ObservableCounterConfig {
cfg.callbacks = append(cfg.callbacks, o.cback) cfg.callbacks = append(cfg.callbacks, o.cback)
return cfg return cfg
} }
func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(cfg Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { func (o float64CallbackOpt) applyFloat64ObservableUpDownCounter(
cfg Float64ObservableUpDownCounterConfig,
) Float64ObservableUpDownCounterConfig {
cfg.callbacks = append(cfg.callbacks, o.cback) cfg.callbacks = append(cfg.callbacks, o.cback)
return cfg return cfg
} }

View File

@ -105,7 +105,9 @@ type Int64ObservableUpDownCounterConfig struct {
// NewInt64ObservableUpDownCounterConfig returns a new // NewInt64ObservableUpDownCounterConfig returns a new
// [Int64ObservableUpDownCounterConfig] with all opts applied. // [Int64ObservableUpDownCounterConfig] with all opts applied.
func NewInt64ObservableUpDownCounterConfig(opts ...Int64ObservableUpDownCounterOption) Int64ObservableUpDownCounterConfig { func NewInt64ObservableUpDownCounterConfig(
opts ...Int64ObservableUpDownCounterOption,
) Int64ObservableUpDownCounterConfig {
var config Int64ObservableUpDownCounterConfig var config Int64ObservableUpDownCounterConfig
for _, o := range opts { for _, o := range opts {
config = o.applyInt64ObservableUpDownCounter(config) config = o.applyInt64ObservableUpDownCounter(config)
@ -223,7 +225,11 @@ type Int64Observer interface {
// attributes as another Int64Callbacks also registered for the same // attributes as another Int64Callbacks also registered for the same
// instrument. // instrument.
// //
// The function needs to be concurrent safe. // The function needs to be reentrant and concurrent safe.
//
// Note that Go's mutexes are not reentrant, and locking a mutex takes
// an indefinite amount of time. It is therefore advised to avoid
// using mutexes inside callbacks.
type Int64Callback func(context.Context, Int64Observer) error type Int64Callback func(context.Context, Int64Observer) error
// Int64ObservableOption applies options to int64 Observer instruments. // Int64ObservableOption applies options to int64 Observer instruments.
@ -242,7 +248,9 @@ func (o int64CallbackOpt) applyInt64ObservableCounter(cfg Int64ObservableCounter
return cfg return cfg
} }
func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(cfg Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { func (o int64CallbackOpt) applyInt64ObservableUpDownCounter(
cfg Int64ObservableUpDownCounterConfig,
) Int64ObservableUpDownCounterConfig {
cfg.callbacks = append(cfg.callbacks, o.cback) cfg.callbacks = append(cfg.callbacks, o.cback)
return cfg return cfg
} }

View File

@ -3,7 +3,11 @@
package metric // import "go.opentelemetry.io/otel/metric" package metric // import "go.opentelemetry.io/otel/metric"
import "go.opentelemetry.io/otel/attribute" import (
"slices"
"go.opentelemetry.io/otel/attribute"
)
// MeterConfig contains options for Meters. // MeterConfig contains options for Meters.
type MeterConfig struct { type MeterConfig struct {
@ -62,12 +66,38 @@ func WithInstrumentationVersion(version string) MeterOption {
}) })
} }
// WithInstrumentationAttributes sets the instrumentation attributes. // WithInstrumentationAttributes adds the instrumentation attributes.
// //
// The passed attributes will be de-duplicated. // This is equivalent to calling [WithInstrumentationAttributeSet] with an
// [attribute.Set] created from a clone of the passed attributes.
// [WithInstrumentationAttributeSet] is recommended for more control.
//
// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
// options are passed, the attributes will be merged together in the order
// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption { func WithInstrumentationAttributes(attr ...attribute.KeyValue) MeterOption {
set := attribute.NewSet(slices.Clone(attr)...)
return WithInstrumentationAttributeSet(set)
}
// WithInstrumentationAttributeSet adds the instrumentation attributes.
//
// If multiple [WithInstrumentationAttributes] or [WithInstrumentationAttributeSet]
// options are passed, the attributes will be merged together in the order
// they are passed. Attributes with duplicate keys will use the last value passed.
func WithInstrumentationAttributeSet(set attribute.Set) MeterOption {
if set.Len() == 0 {
return meterOptionFunc(func(config MeterConfig) MeterConfig {
return config
})
}
return meterOptionFunc(func(config MeterConfig) MeterConfig { return meterOptionFunc(func(config MeterConfig) MeterConfig {
config.attrs = attribute.NewSet(attr...) if config.attrs.Len() == 0 {
config.attrs = set
} else {
config.attrs = mergeSets(config.attrs, set)
}
return config return config
}) })
} }

View File

@ -63,7 +63,9 @@ func (o descOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig)
return c return c
} }
func (o descOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { func (o descOpt) applyFloat64ObservableUpDownCounter(
c Float64ObservableUpDownCounterConfig,
) Float64ObservableUpDownCounterConfig {
c.description = string(o) c.description = string(o)
return c return c
} }
@ -98,7 +100,9 @@ func (o descOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int
return c return c
} }
func (o descOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { func (o descOpt) applyInt64ObservableUpDownCounter(
c Int64ObservableUpDownCounterConfig,
) Int64ObservableUpDownCounterConfig {
c.description = string(o) c.description = string(o)
return c return c
} }
@ -138,7 +142,9 @@ func (o unitOpt) applyFloat64ObservableCounter(c Float64ObservableCounterConfig)
return c return c
} }
func (o unitOpt) applyFloat64ObservableUpDownCounter(c Float64ObservableUpDownCounterConfig) Float64ObservableUpDownCounterConfig { func (o unitOpt) applyFloat64ObservableUpDownCounter(
c Float64ObservableUpDownCounterConfig,
) Float64ObservableUpDownCounterConfig {
c.unit = string(o) c.unit = string(o)
return c return c
} }
@ -173,7 +179,9 @@ func (o unitOpt) applyInt64ObservableCounter(c Int64ObservableCounterConfig) Int
return c return c
} }
func (o unitOpt) applyInt64ObservableUpDownCounter(c Int64ObservableUpDownCounterConfig) Int64ObservableUpDownCounterConfig { func (o unitOpt) applyInt64ObservableUpDownCounter(
c Int64ObservableUpDownCounterConfig,
) Int64ObservableUpDownCounterConfig {
c.unit = string(o) c.unit = string(o)
return c return c
} }

View File

@ -110,7 +110,10 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax. // The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Int64ObservableUpDownCounter(name string, options ...Int64ObservableUpDownCounterOption) (Int64ObservableUpDownCounter, error) Int64ObservableUpDownCounter(
name string,
options ...Int64ObservableUpDownCounterOption,
) (Int64ObservableUpDownCounter, error)
// Int64ObservableGauge returns a new Int64ObservableGauge instrument // Int64ObservableGauge returns a new Int64ObservableGauge instrument
// identified by name and configured with options. The instrument is used // identified by name and configured with options. The instrument is used
@ -194,7 +197,10 @@ type Meter interface {
// The name needs to conform to the OpenTelemetry instrument name syntax. // The name needs to conform to the OpenTelemetry instrument name syntax.
// See the Instrument Name section of the package documentation for more // See the Instrument Name section of the package documentation for more
// information. // information.
Float64ObservableUpDownCounter(name string, options ...Float64ObservableUpDownCounterOption) (Float64ObservableUpDownCounter, error) Float64ObservableUpDownCounter(
name string,
options ...Float64ObservableUpDownCounterOption,
) (Float64ObservableUpDownCounter, error)
// Float64ObservableGauge returns a new Float64ObservableGauge instrument // Float64ObservableGauge returns a new Float64ObservableGauge instrument
// identified by name and configured with options. The instrument is used // identified by name and configured with options. The instrument is used
@ -238,7 +244,11 @@ type Meter interface {
// Callbacks. Meaning, it should not report measurements for an instrument with // Callbacks. Meaning, it should not report measurements for an instrument with
// the same attributes as another Callback will report. // the same attributes as another Callback will report.
// //
// The function needs to be concurrent safe. // The function needs to be reentrant and concurrent safe.
//
// Note that Go's mutexes are not reentrant, and locking a mutex takes
// an indefinite amount of time. It is therefore advised to avoid
// using mutexes inside callbacks.
type Callback func(context.Context, Observer) error type Callback func(context.Context, Observer) error
// Observer records measurements for multiple instruments in a Callback. // Observer records measurements for multiple instruments in a Callback.

View File

@ -0,0 +1,3 @@
# Metric Noop
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/metric/noop)](https://pkg.go.dev/go.opentelemetry.io/otel/metric/noop)

320
vendor/go.opentelemetry.io/otel/metric/noop/noop.go generated vendored Normal file
View File

@ -0,0 +1,320 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package noop provides an implementation of the OpenTelemetry metric API that
// produces no telemetry and minimizes used computation resources.
//
// Using this package to implement the OpenTelemetry metric API will
// effectively disable OpenTelemetry.
//
// This implementation can be embedded in other implementations of the
// OpenTelemetry metric API. Doing so will mean the implementation defaults to
// no operation for methods it does not implement.
package noop // import "go.opentelemetry.io/otel/metric/noop"
import (
"context"
"go.opentelemetry.io/otel/metric"
"go.opentelemetry.io/otel/metric/embedded"
)
var (
// Compile-time check this implements the OpenTelemetry API.
_ metric.MeterProvider = MeterProvider{}
_ metric.Meter = Meter{}
_ metric.Observer = Observer{}
_ metric.Registration = Registration{}
_ metric.Int64Counter = Int64Counter{}
_ metric.Float64Counter = Float64Counter{}
_ metric.Int64UpDownCounter = Int64UpDownCounter{}
_ metric.Float64UpDownCounter = Float64UpDownCounter{}
_ metric.Int64Histogram = Int64Histogram{}
_ metric.Float64Histogram = Float64Histogram{}
_ metric.Int64Gauge = Int64Gauge{}
_ metric.Float64Gauge = Float64Gauge{}
_ metric.Int64ObservableCounter = Int64ObservableCounter{}
_ metric.Float64ObservableCounter = Float64ObservableCounter{}
_ metric.Int64ObservableGauge = Int64ObservableGauge{}
_ metric.Float64ObservableGauge = Float64ObservableGauge{}
_ metric.Int64ObservableUpDownCounter = Int64ObservableUpDownCounter{}
_ metric.Float64ObservableUpDownCounter = Float64ObservableUpDownCounter{}
_ metric.Int64Observer = Int64Observer{}
_ metric.Float64Observer = Float64Observer{}
)
// MeterProvider is an OpenTelemetry No-Op MeterProvider.
type MeterProvider struct{ embedded.MeterProvider }
// NewMeterProvider returns a MeterProvider that does not record any telemetry.
func NewMeterProvider() MeterProvider {
return MeterProvider{}
}
// Meter returns an OpenTelemetry Meter that does not record any telemetry.
func (MeterProvider) Meter(string, ...metric.MeterOption) metric.Meter {
return Meter{}
}
// Meter is an OpenTelemetry No-Op Meter.
type Meter struct{ embedded.Meter }
// Int64Counter returns a Counter used to record int64 measurements that
// produces no telemetry.
func (Meter) Int64Counter(string, ...metric.Int64CounterOption) (metric.Int64Counter, error) {
return Int64Counter{}, nil
}
// Int64UpDownCounter returns an UpDownCounter used to record int64
// measurements that produces no telemetry.
func (Meter) Int64UpDownCounter(string, ...metric.Int64UpDownCounterOption) (metric.Int64UpDownCounter, error) {
return Int64UpDownCounter{}, nil
}
// Int64Histogram returns a Histogram used to record int64 measurements that
// produces no telemetry.
func (Meter) Int64Histogram(string, ...metric.Int64HistogramOption) (metric.Int64Histogram, error) {
return Int64Histogram{}, nil
}
// Int64Gauge returns a Gauge used to record int64 measurements that
// produces no telemetry.
func (Meter) Int64Gauge(string, ...metric.Int64GaugeOption) (metric.Int64Gauge, error) {
return Int64Gauge{}, nil
}
// Int64ObservableCounter returns an ObservableCounter used to record int64
// measurements that produces no telemetry.
func (Meter) Int64ObservableCounter(
string,
...metric.Int64ObservableCounterOption,
) (metric.Int64ObservableCounter, error) {
return Int64ObservableCounter{}, nil
}
// Int64ObservableUpDownCounter returns an ObservableUpDownCounter used to
// record int64 measurements that produces no telemetry.
func (Meter) Int64ObservableUpDownCounter(
string,
...metric.Int64ObservableUpDownCounterOption,
) (metric.Int64ObservableUpDownCounter, error) {
return Int64ObservableUpDownCounter{}, nil
}
// Int64ObservableGauge returns an ObservableGauge used to record int64
// measurements that produces no telemetry.
func (Meter) Int64ObservableGauge(string, ...metric.Int64ObservableGaugeOption) (metric.Int64ObservableGauge, error) {
return Int64ObservableGauge{}, nil
}
// Float64Counter returns a Counter used to record int64 measurements that
// produces no telemetry.
func (Meter) Float64Counter(string, ...metric.Float64CounterOption) (metric.Float64Counter, error) {
return Float64Counter{}, nil
}
// Float64UpDownCounter returns an UpDownCounter used to record int64
// measurements that produces no telemetry.
func (Meter) Float64UpDownCounter(string, ...metric.Float64UpDownCounterOption) (metric.Float64UpDownCounter, error) {
return Float64UpDownCounter{}, nil
}
// Float64Histogram returns a Histogram used to record int64 measurements that
// produces no telemetry.
func (Meter) Float64Histogram(string, ...metric.Float64HistogramOption) (metric.Float64Histogram, error) {
return Float64Histogram{}, nil
}
// Float64Gauge returns a Gauge used to record float64 measurements that
// produces no telemetry.
func (Meter) Float64Gauge(string, ...metric.Float64GaugeOption) (metric.Float64Gauge, error) {
return Float64Gauge{}, nil
}
// Float64ObservableCounter returns an ObservableCounter used to record int64
// measurements that produces no telemetry.
func (Meter) Float64ObservableCounter(
string,
...metric.Float64ObservableCounterOption,
) (metric.Float64ObservableCounter, error) {
return Float64ObservableCounter{}, nil
}
// Float64ObservableUpDownCounter returns an ObservableUpDownCounter used to
// record int64 measurements that produces no telemetry.
func (Meter) Float64ObservableUpDownCounter(
string,
...metric.Float64ObservableUpDownCounterOption,
) (metric.Float64ObservableUpDownCounter, error) {
return Float64ObservableUpDownCounter{}, nil
}
// Float64ObservableGauge returns an ObservableGauge used to record int64
// measurements that produces no telemetry.
func (Meter) Float64ObservableGauge(
string,
...metric.Float64ObservableGaugeOption,
) (metric.Float64ObservableGauge, error) {
return Float64ObservableGauge{}, nil
}
// RegisterCallback performs no operation.
func (Meter) RegisterCallback(metric.Callback, ...metric.Observable) (metric.Registration, error) {
return Registration{}, nil
}
// Observer acts as a recorder of measurements for multiple instruments in a
// Callback, it performing no operation.
type Observer struct{ embedded.Observer }
// ObserveFloat64 performs no operation.
func (Observer) ObserveFloat64(metric.Float64Observable, float64, ...metric.ObserveOption) {
}
// ObserveInt64 performs no operation.
func (Observer) ObserveInt64(metric.Int64Observable, int64, ...metric.ObserveOption) {
}
// Registration is the registration of a Callback with a No-Op Meter.
type Registration struct{ embedded.Registration }
// Unregister unregisters the Callback the Registration represents with the
// No-Op Meter. This will always return nil because the No-Op Meter performs no
// operation, including hold any record of registrations.
func (Registration) Unregister() error { return nil }
// Int64Counter is an OpenTelemetry Counter used to record int64 measurements.
// It produces no telemetry.
type Int64Counter struct{ embedded.Int64Counter }
// Add performs no operation.
func (Int64Counter) Add(context.Context, int64, ...metric.AddOption) {}
// Enabled performs no operation.
func (Int64Counter) Enabled(context.Context) bool { return false }
// Float64Counter is an OpenTelemetry Counter used to record float64
// measurements. It produces no telemetry.
type Float64Counter struct{ embedded.Float64Counter }
// Add performs no operation.
func (Float64Counter) Add(context.Context, float64, ...metric.AddOption) {}
// Enabled performs no operation.
func (Float64Counter) Enabled(context.Context) bool { return false }
// Int64UpDownCounter is an OpenTelemetry UpDownCounter used to record int64
// measurements. It produces no telemetry.
type Int64UpDownCounter struct{ embedded.Int64UpDownCounter }
// Add performs no operation.
func (Int64UpDownCounter) Add(context.Context, int64, ...metric.AddOption) {}
// Enabled performs no operation.
func (Int64UpDownCounter) Enabled(context.Context) bool { return false }
// Float64UpDownCounter is an OpenTelemetry UpDownCounter used to record
// float64 measurements. It produces no telemetry.
type Float64UpDownCounter struct{ embedded.Float64UpDownCounter }
// Add performs no operation.
func (Float64UpDownCounter) Add(context.Context, float64, ...metric.AddOption) {}
// Enabled performs no operation.
func (Float64UpDownCounter) Enabled(context.Context) bool { return false }
// Int64Histogram is an OpenTelemetry Histogram used to record int64
// measurements. It produces no telemetry.
type Int64Histogram struct{ embedded.Int64Histogram }
// Record performs no operation.
func (Int64Histogram) Record(context.Context, int64, ...metric.RecordOption) {}
// Enabled performs no operation.
func (Int64Histogram) Enabled(context.Context) bool { return false }
// Float64Histogram is an OpenTelemetry Histogram used to record float64
// measurements. It produces no telemetry.
type Float64Histogram struct{ embedded.Float64Histogram }
// Record performs no operation.
func (Float64Histogram) Record(context.Context, float64, ...metric.RecordOption) {}
// Enabled performs no operation.
func (Float64Histogram) Enabled(context.Context) bool { return false }
// Int64Gauge is an OpenTelemetry Gauge used to record instantaneous int64
// measurements. It produces no telemetry.
type Int64Gauge struct{ embedded.Int64Gauge }
// Record performs no operation.
func (Int64Gauge) Record(context.Context, int64, ...metric.RecordOption) {}
// Enabled performs no operation.
func (Int64Gauge) Enabled(context.Context) bool { return false }
// Float64Gauge is an OpenTelemetry Gauge used to record instantaneous float64
// measurements. It produces no telemetry.
type Float64Gauge struct{ embedded.Float64Gauge }
// Record performs no operation.
func (Float64Gauge) Record(context.Context, float64, ...metric.RecordOption) {}
// Enabled performs no operation.
func (Float64Gauge) Enabled(context.Context) bool { return false }
// Int64ObservableCounter is an OpenTelemetry ObservableCounter used to record
// int64 measurements. It produces no telemetry.
type Int64ObservableCounter struct {
metric.Int64Observable
embedded.Int64ObservableCounter
}
// Float64ObservableCounter is an OpenTelemetry ObservableCounter used to record
// float64 measurements. It produces no telemetry.
type Float64ObservableCounter struct {
metric.Float64Observable
embedded.Float64ObservableCounter
}
// Int64ObservableGauge is an OpenTelemetry ObservableGauge used to record
// int64 measurements. It produces no telemetry.
type Int64ObservableGauge struct {
metric.Int64Observable
embedded.Int64ObservableGauge
}
// Float64ObservableGauge is an OpenTelemetry ObservableGauge used to record
// float64 measurements. It produces no telemetry.
type Float64ObservableGauge struct {
metric.Float64Observable
embedded.Float64ObservableGauge
}
// Int64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
// used to record int64 measurements. It produces no telemetry.
type Int64ObservableUpDownCounter struct {
metric.Int64Observable
embedded.Int64ObservableUpDownCounter
}
// Float64ObservableUpDownCounter is an OpenTelemetry ObservableUpDownCounter
// used to record float64 measurements. It produces no telemetry.
type Float64ObservableUpDownCounter struct {
metric.Float64Observable
embedded.Float64ObservableUpDownCounter
}
// Int64Observer is a recorder of int64 measurements that performs no operation.
type Int64Observer struct{ embedded.Int64Observer }
// Observe performs no operation.
func (Int64Observer) Observe(int64, ...metric.ObserveOption) {}
// Float64Observer is a recorder of float64 measurements that performs no
// operation.
type Float64Observer struct{ embedded.Float64Observer }
// Observe performs no operation.
func (Float64Observer) Observe(float64, ...metric.ObserveOption) {}

View File

@ -25,6 +25,12 @@ type Float64Counter interface {
// Use the WithAttributeSet (or, if performance is not a concern, // Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes. // the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr float64, options ...AddOption) Add(ctx context.Context, incr float64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
} }
// Float64CounterConfig contains options for synchronous counter instruments that // Float64CounterConfig contains options for synchronous counter instruments that
@ -78,6 +84,12 @@ type Float64UpDownCounter interface {
// Use the WithAttributeSet (or, if performance is not a concern, // Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes. // the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr float64, options ...AddOption) Add(ctx context.Context, incr float64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
} }
// Float64UpDownCounterConfig contains options for synchronous counter // Float64UpDownCounterConfig contains options for synchronous counter
@ -131,6 +143,12 @@ type Float64Histogram interface {
// Use the WithAttributeSet (or, if performance is not a concern, // Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes. // the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, incr float64, options ...RecordOption) Record(ctx context.Context, incr float64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
} }
// Float64HistogramConfig contains options for synchronous histogram // Float64HistogramConfig contains options for synchronous histogram
@ -189,6 +207,12 @@ type Float64Gauge interface {
// Use the WithAttributeSet (or, if performance is not a concern, // Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes. // the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, value float64, options ...RecordOption) Record(ctx context.Context, value float64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
} }
// Float64GaugeConfig contains options for synchronous gauge instruments that // Float64GaugeConfig contains options for synchronous gauge instruments that

View File

@ -25,6 +25,12 @@ type Int64Counter interface {
// Use the WithAttributeSet (or, if performance is not a concern, // Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes. // the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr int64, options ...AddOption) Add(ctx context.Context, incr int64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
} }
// Int64CounterConfig contains options for synchronous counter instruments that // Int64CounterConfig contains options for synchronous counter instruments that
@ -78,6 +84,12 @@ type Int64UpDownCounter interface {
// Use the WithAttributeSet (or, if performance is not a concern, // Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes. // the WithAttributes) option to include measurement attributes.
Add(ctx context.Context, incr int64, options ...AddOption) Add(ctx context.Context, incr int64, options ...AddOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
} }
// Int64UpDownCounterConfig contains options for synchronous counter // Int64UpDownCounterConfig contains options for synchronous counter
@ -131,6 +143,12 @@ type Int64Histogram interface {
// Use the WithAttributeSet (or, if performance is not a concern, // Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes. // the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, incr int64, options ...RecordOption) Record(ctx context.Context, incr int64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
} }
// Int64HistogramConfig contains options for synchronous histogram instruments // Int64HistogramConfig contains options for synchronous histogram instruments
@ -189,6 +207,12 @@ type Int64Gauge interface {
// Use the WithAttributeSet (or, if performance is not a concern, // Use the WithAttributeSet (or, if performance is not a concern,
// the WithAttributes) option to include measurement attributes. // the WithAttributes) option to include measurement attributes.
Record(ctx context.Context, value int64, options ...RecordOption) Record(ctx context.Context, value int64, options ...RecordOption)
// Enabled reports whether the instrument will process measurements for the given context.
//
// This function can be used in places where measuring an instrument
// would result in computationally expensive operations.
Enabled(context.Context) bool
} }
// Int64GaugeConfig contains options for synchronous gauge instruments that // Int64GaugeConfig contains options for synchronous gauge instruments that

View File

@ -20,7 +20,7 @@ type Baggage struct{}
var _ TextMapPropagator = Baggage{} var _ TextMapPropagator = Baggage{}
// Inject sets baggage key-values from ctx into the carrier. // Inject sets baggage key-values from ctx into the carrier.
func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { func (Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
bStr := baggage.FromContext(ctx).String() bStr := baggage.FromContext(ctx).String()
if bStr != "" { if bStr != "" {
carrier.Set(baggageHeader, bStr) carrier.Set(baggageHeader, bStr)
@ -28,7 +28,21 @@ func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) {
} }
// Extract returns a copy of parent with the baggage from the carrier added. // Extract returns a copy of parent with the baggage from the carrier added.
func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { // If carrier implements [ValuesGetter] (e.g. [HeaderCarrier]), Values is invoked
// for multiple values extraction. Otherwise, Get is called.
func (Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context {
if multiCarrier, ok := carrier.(ValuesGetter); ok {
return extractMultiBaggage(parent, multiCarrier)
}
return extractSingleBaggage(parent, carrier)
}
// Fields returns the keys who's values are set with Inject.
func (Baggage) Fields() []string {
return []string{baggageHeader}
}
func extractSingleBaggage(parent context.Context, carrier TextMapCarrier) context.Context {
bStr := carrier.Get(baggageHeader) bStr := carrier.Get(baggageHeader)
if bStr == "" { if bStr == "" {
return parent return parent
@ -41,7 +55,23 @@ func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context
return baggage.ContextWithBaggage(parent, bag) return baggage.ContextWithBaggage(parent, bag)
} }
// Fields returns the keys who's values are set with Inject. func extractMultiBaggage(parent context.Context, carrier ValuesGetter) context.Context {
func (b Baggage) Fields() []string { bVals := carrier.Values(baggageHeader)
return []string{baggageHeader} if len(bVals) == 0 {
return parent
}
var members []baggage.Member
for _, bStr := range bVals {
currBag, err := baggage.Parse(bStr)
if err != nil {
continue
}
members = append(members, currBag.Members()...)
}
b, err := baggage.New(members...)
if err != nil || b.Len() == 0 {
return parent
}
return baggage.ContextWithBaggage(parent, b)
} }

View File

@ -9,6 +9,7 @@ import (
) )
// TextMapCarrier is the storage medium used by a TextMapPropagator. // TextMapCarrier is the storage medium used by a TextMapPropagator.
// See ValuesGetter for how a TextMapCarrier can get multiple values for a key.
type TextMapCarrier interface { type TextMapCarrier interface {
// DO NOT CHANGE: any modification will not be backwards compatible and // DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release. // must never be done outside of a new major release.
@ -19,7 +20,7 @@ type TextMapCarrier interface {
// must never be done outside of a new major release. // must never be done outside of a new major release.
// Set stores the key-value pair. // Set stores the key-value pair.
Set(key string, value string) Set(key, value string)
// DO NOT CHANGE: any modification will not be backwards compatible and // DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release. // must never be done outside of a new major release.
@ -29,6 +30,18 @@ type TextMapCarrier interface {
// must never be done outside of a new major release. // must never be done outside of a new major release.
} }
// ValuesGetter can return multiple values for a single key,
// with contrast to TextMapCarrier.Get which returns a single value.
type ValuesGetter interface {
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
// Values returns all values associated with the passed key.
Values(key string) []string
// DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release.
}
// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage // MapCarrier is a TextMapCarrier that uses a map held in memory as a storage
// medium for propagated key-value pairs. // medium for propagated key-value pairs.
type MapCarrier map[string]string type MapCarrier map[string]string
@ -55,16 +68,27 @@ func (c MapCarrier) Keys() []string {
return keys return keys
} }
// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. // HeaderCarrier adapts http.Header to satisfy the TextMapCarrier and ValuesGetter interfaces.
type HeaderCarrier http.Header type HeaderCarrier http.Header
// Get returns the value associated with the passed key. // Compile time check that HeaderCarrier implements ValuesGetter.
var _ TextMapCarrier = HeaderCarrier{}
// Compile time check that HeaderCarrier implements TextMapCarrier.
var _ ValuesGetter = HeaderCarrier{}
// Get returns the first value associated with the passed key.
func (hc HeaderCarrier) Get(key string) string { func (hc HeaderCarrier) Get(key string) string {
return http.Header(hc).Get(key) return http.Header(hc).Get(key)
} }
// Values returns all values associated with the passed key.
func (hc HeaderCarrier) Values(key string) []string {
return http.Header(hc).Values(key)
}
// Set stores the key-value pair. // Set stores the key-value pair.
func (hc HeaderCarrier) Set(key string, value string) { func (hc HeaderCarrier) Set(key, value string) {
http.Header(hc).Set(key, value) http.Header(hc).Set(key, value)
} }
@ -89,6 +113,8 @@ type TextMapPropagator interface {
// must never be done outside of a new major release. // must never be done outside of a new major release.
// Extract reads cross-cutting concerns from the carrier into a Context. // Extract reads cross-cutting concerns from the carrier into a Context.
// Implementations may check if the carrier implements ValuesGetter,
// to support extraction of multiple values per key.
Extract(ctx context.Context, carrier TextMapCarrier) context.Context Extract(ctx context.Context, carrier TextMapCarrier) context.Context
// DO NOT CHANGE: any modification will not be backwards compatible and // DO NOT CHANGE: any modification will not be backwards compatible and
// must never be done outside of a new major release. // must never be done outside of a new major release.

View File

@ -36,7 +36,7 @@ var (
) )
// Inject injects the trace context from ctx into carrier. // Inject injects the trace context from ctx into carrier.
func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { func (TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) {
sc := trace.SpanContextFromContext(ctx) sc := trace.SpanContextFromContext(ctx)
if !sc.IsValid() { if !sc.IsValid() {
return return
@ -77,7 +77,7 @@ func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) cont
return trace.ContextWithRemoteSpanContext(ctx, sc) return trace.ContextWithRemoteSpanContext(ctx, sc)
} }
func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { func (TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
h := carrier.Get(traceparentHeader) h := carrier.Get(traceparentHeader)
if h == "" { if h == "" {
return trace.SpanContext{} return trace.SpanContext{}
@ -111,7 +111,7 @@ func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext {
} }
// Clear all flags other than the trace-context supported sampling bit. // Clear all flags other than the trace-context supported sampling bit.
scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled // nolint:gosec // slice size already checked.
// Ignore the error returned here. Failure to parse tracestate MUST NOT // Ignore the error returned here. Failure to parse tracestate MUST NOT
// affect the parsing of traceparent according to the W3C tracecontext // affect the parsing of traceparent according to the W3C tracecontext
@ -151,6 +151,6 @@ func extractPart(dst []byte, h *string, n int) bool {
} }
// Fields returns the keys who's values are set with Inject. // Fields returns the keys who's values are set with Inject.
func (tc TraceContext) Fields() []string { func (TraceContext) Fields() []string {
return []string{traceparentHeader, tracestateHeader} return []string{traceparentHeader, tracestateHeader}
} }

View File

@ -1,7 +1,8 @@
{ {
"$schema": "https://docs.renovatebot.com/renovate-schema.json", "$schema": "https://docs.renovatebot.com/renovate-schema.json",
"extends": [ "extends": [
"config:best-practices" "config:best-practices",
"helpers:pinGitHubActionDigestsToSemver"
], ],
"ignorePaths": [], "ignorePaths": [],
"labels": ["Skip Changelog", "dependencies"], "labels": ["Skip Changelog", "dependencies"],
@ -25,6 +26,10 @@
{ {
"matchPackageNames": ["golang.org/x/**"], "matchPackageNames": ["golang.org/x/**"],
"groupName": "golang.org/x" "groupName": "golang.org/x"
},
{
"matchPackageNames": ["go.opentelemetry.io/otel/sdk/log/logtest"],
"enabled": false
} }
] ]
} }

View File

@ -199,3 +199,33 @@
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and See the License for the specific language governing permissions and
limitations under the License. limitations under the License.
--------------------------------------------------------------------------------
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -0,0 +1,39 @@
// Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0
// Package x documents experimental features for [go.opentelemetry.io/otel/sdk].
package x // import "go.opentelemetry.io/otel/sdk/internal/x"
import "strings"
// Resource is an experimental feature flag that defines if resource detectors
// should be included experimental semantic conventions.
//
// To enable this feature set the OTEL_GO_X_RESOURCE environment variable
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
var Resource = newFeature(
[]string{"RESOURCE"},
func(v string) (string, bool) {
if strings.EqualFold(v, "true") {
return v, true
}
return "", false
},
)
// Observability is an experimental feature flag that determines if SDK
// observability metrics are enabled.
//
// To enable this feature set the OTEL_GO_X_OBSERVABILITY environment variable
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
var Observability = newFeature(
[]string{"OBSERVABILITY", "SELF_OBSERVABILITY"},
func(v string) (string, bool) {
if strings.EqualFold(v, "true") {
return v, true
}
return "", false
},
)

View File

@ -1,48 +1,38 @@
// Code generated by gotmpl. DO NOT MODIFY.
// source: internal/shared/x/x.go.tmpl
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
// Package x contains support for OTel SDK experimental features. // Package x documents experimental features for [go.opentelemetry.io/otel/sdk].
//
// This package should only be used for features defined in the specification.
// It should not be used for experiments or new project ideas.
package x // import "go.opentelemetry.io/otel/sdk/internal/x" package x // import "go.opentelemetry.io/otel/sdk/internal/x"
import ( import (
"os" "os"
"strings"
) )
// Resource is an experimental feature flag that defines if resource detectors
// should be included experimental semantic conventions.
//
// To enable this feature set the OTEL_GO_X_RESOURCE environment variable
// to the case-insensitive string value of "true" (i.e. "True" and "TRUE"
// will also enable this).
var Resource = newFeature("RESOURCE", func(v string) (string, bool) {
if strings.ToLower(v) == "true" {
return v, true
}
return "", false
})
// Feature is an experimental feature control flag. It provides a uniform way // Feature is an experimental feature control flag. It provides a uniform way
// to interact with these feature flags and parse their values. // to interact with these feature flags and parse their values.
type Feature[T any] struct { type Feature[T any] struct {
key string keys []string
parse func(v string) (T, bool) parse func(v string) (T, bool)
} }
func newFeature[T any](suffix string, parse func(string) (T, bool)) Feature[T] { func newFeature[T any](suffix []string, parse func(string) (T, bool)) Feature[T] {
const envKeyRoot = "OTEL_GO_X_" const envKeyRoot = "OTEL_GO_X_"
keys := make([]string, 0, len(suffix))
for _, s := range suffix {
keys = append(keys, envKeyRoot+s)
}
return Feature[T]{ return Feature[T]{
key: envKeyRoot + suffix, keys: keys,
parse: parse, parse: parse,
} }
} }
// Key returns the environment variable key that needs to be set to enable the // Keys returns the environment variable keys that can be set to enable the
// feature. // feature.
func (f Feature[T]) Key() string { return f.key } func (f Feature[T]) Keys() []string { return f.keys }
// Lookup returns the user configured value for the feature and true if the // Lookup returns the user configured value for the feature and true if the
// user has enabled the feature. Otherwise, if the feature is not enabled, a // user has enabled the feature. Otherwise, if the feature is not enabled, a
@ -52,14 +42,16 @@ func (f Feature[T]) Lookup() (v T, ok bool) {
// //
// > The SDK MUST interpret an empty value of an environment variable the // > The SDK MUST interpret an empty value of an environment variable the
// > same way as when the variable is unset. // > same way as when the variable is unset.
vRaw := os.Getenv(f.key) for _, key := range f.keys {
if vRaw == "" { vRaw := os.Getenv(key)
return v, ok if vRaw != "" {
return f.parse(vRaw)
}
} }
return f.parse(vRaw) return v, ok
} }
// Enabled returns if the feature is enabled. // Enabled reports whether the feature is enabled.
func (f Feature[T]) Enabled() bool { func (f Feature[T]) Enabled() bool {
_, ok := f.Lookup() _, ok := f.Lookup()
return ok return ok

View File

@ -13,7 +13,7 @@ import (
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk" "go.opentelemetry.io/otel/sdk"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0" semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
) )
type ( type (
@ -72,7 +72,7 @@ func StringDetector(schemaURL string, k attribute.Key, f func() (string, error))
// Detect returns a *Resource that describes the string as a value // Detect returns a *Resource that describes the string as a value
// corresponding to attribute.Key as well as the specific schemaURL. // corresponding to attribute.Key as well as the specific schemaURL.
func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { func (sd stringDetector) Detect(context.Context) (*Resource, error) {
value, err := sd.F() value, err := sd.F()
if err != nil { if err != nil {
return nil, fmt.Errorf("%s: %w", string(sd.K), err) return nil, fmt.Errorf("%s: %w", string(sd.K), err)

View File

@ -11,7 +11,7 @@ import (
"os" "os"
"regexp" "regexp"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0" semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
) )
type containerIDProvider func() (string, error) type containerIDProvider func() (string, error)
@ -27,7 +27,7 @@ const cgroupPath = "/proc/self/cgroup"
// Detect returns a *Resource that describes the id of the container. // Detect returns a *Resource that describes the id of the container.
// If no container id found, an empty resource will be returned. // If no container id found, an empty resource will be returned.
func (cgroupContainerIDDetector) Detect(ctx context.Context) (*Resource, error) { func (cgroupContainerIDDetector) Detect(context.Context) (*Resource, error) {
containerID, err := containerID() containerID, err := containerID()
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -12,7 +12,7 @@ import (
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0" semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
) )
const ( const (

View File

@ -8,7 +8,7 @@ import (
"errors" "errors"
"strings" "strings"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0" semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
) )
type hostIDProvider func() (string, error) type hostIDProvider func() (string, error)
@ -51,17 +51,16 @@ type hostIDReaderDarwin struct {
execCommand commandExecutor execCommand commandExecutor
} }
// read executes `ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id // read executes `/usr/sbin/ioreg -rd1 -c "IOPlatformExpertDevice"` and parses host id
// from the IOPlatformUUID line. If the command fails or the uuid cannot be // from the IOPlatformUUID line. If the command fails or the uuid cannot be
// parsed an error will be returned. // parsed an error will be returned.
func (r *hostIDReaderDarwin) read() (string, error) { func (r *hostIDReaderDarwin) read() (string, error) {
result, err := r.execCommand("ioreg", "-rd1", "-c", "IOPlatformExpertDevice") result, err := r.execCommand("/usr/sbin/ioreg", "-rd1", "-c", "IOPlatformExpertDevice")
if err != nil { if err != nil {
return "", err return "", err
} }
lines := strings.Split(result, "\n") for line := range strings.SplitSeq(result, "\n") {
for _, line := range lines {
if strings.Contains(line, "IOPlatformUUID") { if strings.Contains(line, "IOPlatformUUID") {
parts := strings.Split(line, " = ") parts := strings.Split(line, " = ")
if len(parts) == 2 { if len(parts) == 2 {
@ -96,7 +95,7 @@ func (r *hostIDReaderLinux) read() (string, error) {
type hostIDDetector struct{} type hostIDDetector struct{}
// Detect returns a *Resource containing the platform specific host id. // Detect returns a *Resource containing the platform specific host id.
func (hostIDDetector) Detect(ctx context.Context) (*Resource, error) { func (hostIDDetector) Detect(context.Context) (*Resource, error) {
hostID, err := hostID() hostID, err := hostID()
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
//go:build dragonfly || freebsd || netbsd || openbsd || solaris //go:build dragonfly || freebsd || netbsd || openbsd || solaris
// +build dragonfly freebsd netbsd openbsd solaris
package resource // import "go.opentelemetry.io/otel/sdk/resource" package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
//go:build linux //go:build linux
// +build linux
package resource // import "go.opentelemetry.io/otel/sdk/resource" package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows //go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows
// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows
package resource // import "go.opentelemetry.io/otel/sdk/resource" package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
//go:build windows //go:build windows
// +build windows
package resource // import "go.opentelemetry.io/otel/sdk/resource" package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@ -8,7 +8,7 @@ import (
"strings" "strings"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0" semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
) )
type osDescriptionProvider func() (string, error) type osDescriptionProvider func() (string, error)
@ -32,7 +32,7 @@ type (
// Detect returns a *Resource that describes the operating system type the // Detect returns a *Resource that describes the operating system type the
// service is running on. // service is running on.
func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { func (osTypeDetector) Detect(context.Context) (*Resource, error) {
osType := runtimeOS() osType := runtimeOS()
osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) osTypeAttribute := mapRuntimeOSToSemconvOSType(osType)
@ -45,7 +45,7 @@ func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) {
// Detect returns a *Resource that describes the operating system the // Detect returns a *Resource that describes the operating system the
// service is running on. // service is running on.
func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { func (osDescriptionDetector) Detect(context.Context) (*Resource, error) {
description, err := osDescription() description, err := osDescription()
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos //go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
// +build aix dragonfly freebsd linux netbsd openbsd solaris zos
package resource // import "go.opentelemetry.io/otel/sdk/resource" package resource // import "go.opentelemetry.io/otel/sdk/resource"
@ -63,12 +62,12 @@ func parseOSReleaseFile(file io.Reader) map[string]string {
return values return values
} }
// skip returns true if the line is blank or starts with a '#' character, and // skip reports whether the line is blank or starts with a '#' character, and
// therefore should be skipped from processing. // therefore should be skipped from processing.
func skip(line string) bool { func skip(line string) bool {
line = strings.TrimSpace(line) line = strings.TrimSpace(line)
return len(line) == 0 || strings.HasPrefix(line, "#") return line == "" || strings.HasPrefix(line, "#")
} }
// parse attempts to split the provided line on the first '=' character, and then // parse attempts to split the provided line on the first '=' character, and then
@ -76,7 +75,7 @@ func skip(line string) bool {
func parse(line string) (string, string, bool) { func parse(line string) (string, string, bool) {
k, v, found := strings.Cut(line, "=") k, v, found := strings.Cut(line, "=")
if !found || len(k) == 0 { if !found || k == "" {
return "", "", false return "", "", false
} }

View File

@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos //go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos
// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos
package resource // import "go.opentelemetry.io/otel/sdk/resource" package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@ -2,7 +2,6 @@
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos //go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !windows && !zos
// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!windows,!zos
package resource // import "go.opentelemetry.io/otel/sdk/resource" package resource // import "go.opentelemetry.io/otel/sdk/resource"

View File

@ -11,7 +11,7 @@ import (
"path/filepath" "path/filepath"
"runtime" "runtime"
semconv "go.opentelemetry.io/otel/semconv/v1.26.0" semconv "go.opentelemetry.io/otel/semconv/v1.39.0"
) )
type ( type (
@ -112,19 +112,19 @@ type (
// Detect returns a *Resource that describes the process identifier (PID) of the // Detect returns a *Resource that describes the process identifier (PID) of the
// executing process. // executing process.
func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { func (processPIDDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPID(pid())), nil
} }
// Detect returns a *Resource that describes the name of the process executable. // Detect returns a *Resource that describes the name of the process executable.
func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { func (processExecutableNameDetector) Detect(context.Context) (*Resource, error) {
executableName := filepath.Base(commandArgs()[0]) executableName := filepath.Base(commandArgs()[0])
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableName(executableName)), nil
} }
// Detect returns a *Resource that describes the full path of the process executable. // Detect returns a *Resource that describes the full path of the process executable.
func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { func (processExecutablePathDetector) Detect(context.Context) (*Resource, error) {
executablePath, err := executablePath() executablePath, err := executablePath()
if err != nil { if err != nil {
return nil, err return nil, err
@ -135,13 +135,13 @@ func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, err
// Detect returns a *Resource that describes all the command arguments as received // Detect returns a *Resource that describes all the command arguments as received
// by the process. // by the process.
func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { func (processCommandArgsDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgs(commandArgs()...)), nil
} }
// Detect returns a *Resource that describes the username of the user that owns the // Detect returns a *Resource that describes the username of the user that owns the
// process. // process.
func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { func (processOwnerDetector) Detect(context.Context) (*Resource, error) {
owner, err := owner() owner, err := owner()
if err != nil { if err != nil {
return nil, err return nil, err
@ -152,17 +152,17 @@ func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) {
// Detect returns a *Resource that describes the name of the compiler used to compile // Detect returns a *Resource that describes the name of the compiler used to compile
// this process image. // this process image.
func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { func (processRuntimeNameDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeName(runtimeName())), nil
} }
// Detect returns a *Resource that describes the version of the runtime of this process. // Detect returns a *Resource that describes the version of the runtime of this process.
func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { func (processRuntimeVersionDetector) Detect(context.Context) (*Resource, error) {
return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersion(runtimeVersion())), nil
} }
// Detect returns a *Resource that describes the runtime of this process. // Detect returns a *Resource that describes the runtime of this process.
func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { func (processRuntimeDescriptionDetector) Detect(context.Context) (*Resource, error) {
runtimeDescription := fmt.Sprintf( runtimeDescription := fmt.Sprintf(
"go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch())

View File

@ -112,7 +112,7 @@ func (r *Resource) String() string {
} }
// MarshalLog is the marshaling function used by the logging system to represent this Resource. // MarshalLog is the marshaling function used by the logging system to represent this Resource.
func (r *Resource) MarshalLog() interface{} { func (r *Resource) MarshalLog() any {
return struct { return struct {
Attributes attribute.Set Attributes attribute.Set
SchemaURL string SchemaURL string
@ -148,7 +148,7 @@ func (r *Resource) Iter() attribute.Iterator {
return r.attrs.Iter() return r.attrs.Iter()
} }
// Equal returns whether r and o represent the same resource. Two resources can // Equal reports whether r and o represent the same resource. Two resources can
// be equal even if they have different schema URLs. // be equal even if they have different schema URLs.
// //
// See the documentation on the [Resource] type for the pitfalls of using == // See the documentation on the [Resource] type for the pitfalls of using ==

View File

@ -5,20 +5,24 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import ( import (
"context" "context"
"errors"
"sync" "sync"
"sync/atomic" "sync/atomic"
"time" "time"
"go.opentelemetry.io/otel" "go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/internal/global"
"go.opentelemetry.io/otel/sdk/internal/env" "go.opentelemetry.io/otel/sdk/trace/internal/env"
"go.opentelemetry.io/otel/sdk/trace/internal/observ"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
) )
// Defaults for BatchSpanProcessorOptions. // Defaults for BatchSpanProcessorOptions.
const ( const (
DefaultMaxQueueSize = 2048 DefaultMaxQueueSize = 2048
DefaultScheduleDelay = 5000 // DefaultScheduleDelay is the delay interval between two consecutive exports, in milliseconds.
DefaultScheduleDelay = 5000
// DefaultExportTimeout is the duration after which an export is cancelled, in milliseconds.
DefaultExportTimeout = 30000 DefaultExportTimeout = 30000
DefaultMaxExportBatchSize = 512 DefaultMaxExportBatchSize = 512
) )
@ -66,6 +70,8 @@ type batchSpanProcessor struct {
queue chan ReadOnlySpan queue chan ReadOnlySpan
dropped uint32 dropped uint32
inst *observ.BSP
batch []ReadOnlySpan batch []ReadOnlySpan
batchMutex sync.Mutex batchMutex sync.Mutex
timer *time.Timer timer *time.Timer
@ -86,11 +92,7 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize)
if maxExportBatchSize > maxQueueSize { if maxExportBatchSize > maxQueueSize {
if DefaultMaxExportBatchSize > maxQueueSize { maxExportBatchSize = min(DefaultMaxExportBatchSize, maxQueueSize)
maxExportBatchSize = maxQueueSize
} else {
maxExportBatchSize = DefaultMaxExportBatchSize
}
} }
o := BatchSpanProcessorOptions{ o := BatchSpanProcessorOptions{
@ -111,6 +113,16 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
stopCh: make(chan struct{}), stopCh: make(chan struct{}),
} }
var err error
bsp.inst, err = observ.NewBSP(
nextProcessorID(),
func() int64 { return int64(len(bsp.queue)) },
int64(bsp.o.MaxQueueSize),
)
if err != nil {
otel.Handle(err)
}
bsp.stopWait.Add(1) bsp.stopWait.Add(1)
go func() { go func() {
defer bsp.stopWait.Done() defer bsp.stopWait.Done()
@ -121,8 +133,16 @@ func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorO
return bsp return bsp
} }
var processorIDCounter atomic.Int64
// nextProcessorID returns an identifier for this batch span processor,
// starting with 0 and incrementing by 1 each time it is called.
func nextProcessorID() int64 {
return processorIDCounter.Add(1) - 1
}
// OnStart method does nothing. // OnStart method does nothing.
func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} func (*batchSpanProcessor) OnStart(context.Context, ReadWriteSpan) {}
// OnEnd method enqueues a ReadOnlySpan for later processing. // OnEnd method enqueues a ReadOnlySpan for later processing.
func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) {
@ -161,6 +181,9 @@ func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error {
case <-ctx.Done(): case <-ctx.Done():
err = ctx.Err() err = ctx.Err()
} }
if bsp.inst != nil {
err = errors.Join(err, bsp.inst.Shutdown())
}
}) })
return err return err
} }
@ -170,7 +193,7 @@ type forceFlushSpan struct {
flushed chan struct{} flushed chan struct{}
} }
func (f forceFlushSpan) SpanContext() trace.SpanContext { func (forceFlushSpan) SpanContext() trace.SpanContext {
return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled})
} }
@ -267,12 +290,15 @@ func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error {
if bsp.o.ExportTimeout > 0 { if bsp.o.ExportTimeout > 0 {
var cancel context.CancelFunc var cancel context.CancelFunc
ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) ctx, cancel = context.WithTimeoutCause(ctx, bsp.o.ExportTimeout, errors.New("processor export timeout"))
defer cancel() defer cancel()
} }
if l := len(bsp.batch); l > 0 { if l := len(bsp.batch); l > 0 {
global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped)) global.Debug("exporting spans", "count", len(bsp.batch), "total_dropped", atomic.LoadUint32(&bsp.dropped))
if bsp.inst != nil {
bsp.inst.Processed(ctx, int64(l))
}
err := bsp.e.ExportSpans(ctx, bsp.batch) err := bsp.e.ExportSpans(ctx, bsp.batch)
// A new batch is always created after exporting, even if the batch failed to be exported. // A new batch is always created after exporting, even if the batch failed to be exported.
@ -381,11 +407,14 @@ func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd R
case bsp.queue <- sd: case bsp.queue <- sd:
return true return true
case <-ctx.Done(): case <-ctx.Done():
if bsp.inst != nil {
bsp.inst.ProcessedQueueFull(ctx, 1)
}
return false return false
} }
} }
func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) bool { func (bsp *batchSpanProcessor) enqueueDrop(ctx context.Context, sd ReadOnlySpan) bool {
if !sd.SpanContext().IsSampled() { if !sd.SpanContext().IsSampled() {
return false return false
} }
@ -395,12 +424,15 @@ func (bsp *batchSpanProcessor) enqueueDrop(_ context.Context, sd ReadOnlySpan) b
return true return true
default: default:
atomic.AddUint32(&bsp.dropped, 1) atomic.AddUint32(&bsp.dropped, 1)
if bsp.inst != nil {
bsp.inst.ProcessedQueueFull(ctx, 1)
}
} }
return false return false
} }
// MarshalLog is the marshaling function used by the logging system to represent this Span Processor. // MarshalLog is the marshaling function used by the logging system to represent this Span Processor.
func (bsp *batchSpanProcessor) MarshalLog() interface{} { func (bsp *batchSpanProcessor) MarshalLog() any {
return struct { return struct {
Type string Type string
SpanExporter SpanExporter SpanExporter SpanExporter

View File

@ -6,5 +6,8 @@ Package trace contains support for OpenTelemetry distributed tracing.
The following assumes a basic familiarity with OpenTelemetry concepts. The following assumes a basic familiarity with OpenTelemetry concepts.
See https://opentelemetry.io. See https://opentelemetry.io.
See [go.opentelemetry.io/otel/sdk/internal/x] for information about
the experimental features.
*/ */
package trace // import "go.opentelemetry.io/otel/sdk/trace" package trace // import "go.opentelemetry.io/otel/sdk/trace"

View File

@ -5,10 +5,8 @@ package trace // import "go.opentelemetry.io/otel/sdk/trace"
import ( import (
"context" "context"
crand "crypto/rand"
"encoding/binary" "encoding/binary"
"math/rand" "math/rand/v2"
"sync"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
) )
@ -29,20 +27,15 @@ type IDGenerator interface {
// must never be done outside of a new major release. // must never be done outside of a new major release.
} }
type randomIDGenerator struct { type randomIDGenerator struct{}
sync.Mutex
randSource *rand.Rand
}
var _ IDGenerator = &randomIDGenerator{} var _ IDGenerator = &randomIDGenerator{}
// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. // NewSpanID returns a non-zero span ID from a randomly-chosen sequence.
func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { func (*randomIDGenerator) NewSpanID(context.Context, trace.TraceID) trace.SpanID {
gen.Lock()
defer gen.Unlock()
sid := trace.SpanID{} sid := trace.SpanID{}
for { for {
_, _ = gen.randSource.Read(sid[:]) binary.NativeEndian.PutUint64(sid[:], rand.Uint64())
if sid.IsValid() { if sid.IsValid() {
break break
} }
@ -52,19 +45,18 @@ func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.Trace
// NewIDs returns a non-zero trace ID and a non-zero span ID from a // NewIDs returns a non-zero trace ID and a non-zero span ID from a
// randomly-chosen sequence. // randomly-chosen sequence.
func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { func (*randomIDGenerator) NewIDs(context.Context) (trace.TraceID, trace.SpanID) {
gen.Lock()
defer gen.Unlock()
tid := trace.TraceID{} tid := trace.TraceID{}
sid := trace.SpanID{} sid := trace.SpanID{}
for { for {
_, _ = gen.randSource.Read(tid[:]) binary.NativeEndian.PutUint64(tid[:8], rand.Uint64())
binary.NativeEndian.PutUint64(tid[8:], rand.Uint64())
if tid.IsValid() { if tid.IsValid() {
break break
} }
} }
for { for {
_, _ = gen.randSource.Read(sid[:]) binary.NativeEndian.PutUint64(sid[:], rand.Uint64())
if sid.IsValid() { if sid.IsValid() {
break break
} }
@ -73,9 +65,5 @@ func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.
} }
func defaultIDGenerator() IDGenerator { func defaultIDGenerator() IDGenerator {
gen := &randomIDGenerator{} return &randomIDGenerator{}
var rngSeed int64
_ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed)
gen.randSource = rand.New(rand.NewSource(rngSeed))
return gen
} }

View File

@ -1,7 +1,9 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// SPDX-License-Identifier: Apache-2.0 // SPDX-License-Identifier: Apache-2.0
package env // import "go.opentelemetry.io/otel/sdk/internal/env" // Package env provides types and functionality for environment variable support
// in the OpenTelemetry SDK.
package env // import "go.opentelemetry.io/otel/sdk/trace/internal/env"
import ( import (
"os" "os"

Some files were not shown because too many files have changed in this diff Show More