TUN-8407: Upgrade go to version 1.22.2

This commit is contained in:
João "Pisco" Fernandes 2024-05-07 11:19:58 +01:00
parent f27418044b
commit 66efd3f2bb
412 changed files with 28140 additions and 17395 deletions

View File

@ -4,7 +4,7 @@ jobs:
check: check:
strategy: strategy:
matrix: matrix:
go-version: [1.21.x] go-version: [1.22.x]
os: [ubuntu-latest, macos-latest, windows-latest] os: [ubuntu-latest, macos-latest, windows-latest]
runs-on: ${{ matrix.os }} runs-on: ${{ matrix.os }}
steps: steps:

View File

@ -3,6 +3,6 @@
cd /tmp cd /tmp
git clone -q https://github.com/cloudflare/go git clone -q https://github.com/cloudflare/go
cd go/src cd go/src
# https://github.com/cloudflare/go/tree/34129e47042e214121b6bbff0ded4712debed18e is version go1.21.5-devel-cf # https://github.com/cloudflare/go/tree/ec0a014545f180b0c74dfd687698657a9e86e310 is version go1.22.2-devel-cf
git checkout -q 34129e47042e214121b6bbff0ded4712debed18e git checkout -q ec0a014545f180b0c74dfd687698657a9e86e310
./make.bash ./make.bash

View File

@ -9,8 +9,8 @@ Set-Location "$Env:Temp"
git clone -q https://github.com/cloudflare/go git clone -q https://github.com/cloudflare/go
Write-Output "Building go..." Write-Output "Building go..."
cd go/src cd go/src
# https://github.com/cloudflare/go/tree/34129e47042e214121b6bbff0ded4712debed18e is version go1.21.5-devel-cf # https://github.com/cloudflare/go/tree/ec0a014545f180b0c74dfd687698657a9e86e310 is version go1.22.2-devel-cf
git checkout -q 34129e47042e214121b6bbff0ded4712debed18e git checkout -q ec0a014545f180b0c74dfd687698657a9e86e310
& ./make.bat & ./make.bat
Write-Output "Installed" Write-Output "Installed"

View File

@ -1,20 +1,20 @@
$ErrorActionPreference = "Stop" $ErrorActionPreference = "Stop"
$ProgressPreference = "SilentlyContinue" $ProgressPreference = "SilentlyContinue"
$GoMsiVersion = "go1.21.5.windows-amd64.msi" $GoMsiVersion = "go1.22.2.windows-amd64.msi"
Write-Output "Downloading go installer..." Write-Output "Downloading go installer..."
Set-Location "$Env:Temp" Set-Location "$Env:Temp"
(New-Object System.Net.WebClient).DownloadFile( (New-Object System.Net.WebClient).DownloadFile(
"https://go.dev/dl/$GoMsiVersion", "https://go.dev/dl/$GoMsiVersion",
"$Env:Temp\$GoMsiVersion" "$Env:Temp\$GoMsiVersion"
) )
Write-Output "Installing go..." Write-Output "Installing go..."
Install-Package "$Env:Temp\$GoMsiVersion" -Force Install-Package "$Env:Temp\$GoMsiVersion" -Force
# Go installer updates global $PATH # Go installer updates global $PATH
go env go env
Write-Output "Installed" Write-Output "Installed"

View File

@ -1,7 +1,7 @@
# use a builder image for building cloudflare # use a builder image for building cloudflare
ARG TARGET_GOOS ARG TARGET_GOOS
ARG TARGET_GOARCH ARG TARGET_GOARCH
FROM golang:1.21.5 as builder FROM golang:1.22.2 as builder
ENV GO111MODULE=on \ ENV GO111MODULE=on \
CGO_ENABLED=0 \ CGO_ENABLED=0 \
TARGET_GOOS=${TARGET_GOOS} \ TARGET_GOOS=${TARGET_GOOS} \

View File

@ -1,8 +1,8 @@
# use a builder image for building cloudflare # use a builder image for building cloudflare
FROM golang:1.21.5 as builder FROM golang:1.22.2 as builder
ENV GO111MODULE=on \ ENV GO111MODULE=on \
CGO_ENABLED=0 CGO_ENABLED=0
WORKDIR /go/src/github.com/cloudflare/cloudflared/ WORKDIR /go/src/github.com/cloudflare/cloudflared/
# copy our sources into the builder image # copy our sources into the builder image

View File

@ -1,8 +1,8 @@
# use a builder image for building cloudflare # use a builder image for building cloudflare
FROM golang:1.21.5 as builder FROM golang:1.22.2 as builder
ENV GO111MODULE=on \ ENV GO111MODULE=on \
CGO_ENABLED=0 CGO_ENABLED=0
WORKDIR /go/src/github.com/cloudflare/cloudflared/ WORKDIR /go/src/github.com/cloudflare/cloudflared/
# copy our sources into the builder image # copy our sources into the builder image

View File

@ -1,4 +1,4 @@
pinned_go: &pinned_go go-boring=1.21.5-1 pinned_go: &pinned_go go-boring=1.22.2-1
build_dir: &build_dir /cfsetup_build build_dir: &build_dir /cfsetup_build
default-flavor: bullseye default-flavor: bullseye
@ -33,11 +33,11 @@ buster: &buster
- ./build-packages-fips.sh - ./build-packages-fips.sh
# Build binary for component test # Build binary for component test
- GOOS=linux GOARCH=amd64 make cloudflared - GOOS=linux GOARCH=amd64 make cloudflared
cover: cover:
build_dir: *build_dir build_dir: *build_dir
builddeps: *build_deps builddeps: *build_deps
pre-cache: *build_pre_cache pre-cache: *build_pre_cache
post-cache: post-cache:
- make cover - make cover
# except FIPS (handled in github-fips-release-pkgs) and macos (handled in github-release-macos-amd64) # except FIPS (handled in github-fips-release-pkgs) and macos (handled in github-release-macos-amd64)
github-release-pkgs: github-release-pkgs:
@ -96,7 +96,7 @@ buster: &buster
- make github-release-built-pkgs - make github-release-built-pkgs
generate-versions-file: generate-versions-file:
build_dir: *build_dir build_dir: *build_dir
builddeps: builddeps:
- *pinned_go - *pinned_go
- build-essential - build-essential
post-cache: post-cache:

View File

@ -1,4 +1,4 @@
FROM golang:1.21.5 as builder FROM golang:1.22.2 as builder
ENV GO111MODULE=on \ ENV GO111MODULE=on \
CGO_ENABLED=0 CGO_ENABLED=0
WORKDIR /go/src/github.com/cloudflare/cloudflared/ WORKDIR /go/src/github.com/cloudflare/cloudflared/

46
go.mod
View File

@ -1,6 +1,6 @@
module github.com/cloudflare/cloudflared module github.com/cloudflare/cloudflared
go 1.21 go 1.22
require ( require (
github.com/coredns/coredns v1.10.0 github.com/coredns/coredns v1.10.0
@ -16,7 +16,7 @@ require (
github.com/gobwas/ws v1.0.4 github.com/gobwas/ws v1.0.4
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3
github.com/google/gopacket v1.1.19 github.com/google/gopacket v1.1.19
github.com/google/uuid v1.3.1 github.com/google/uuid v1.6.0
github.com/gorilla/websocket v1.4.2 github.com/gorilla/websocket v1.4.2
github.com/json-iterator/go v1.1.12 github.com/json-iterator/go v1.1.12
github.com/mattn/go-colorable v0.1.13 github.com/mattn/go-colorable v0.1.13
@ -27,21 +27,21 @@ require (
github.com/prometheus/client_model v0.2.0 github.com/prometheus/client_model v0.2.0
github.com/quic-go/quic-go v0.42.0 github.com/quic-go/quic-go v0.42.0
github.com/rs/zerolog v1.20.0 github.com/rs/zerolog v1.20.0
github.com/stretchr/testify v1.8.4 github.com/stretchr/testify v1.9.0
github.com/urfave/cli/v2 v2.3.0 github.com/urfave/cli/v2 v2.3.0
go.opentelemetry.io/contrib/propagators v0.22.0 go.opentelemetry.io/contrib/propagators v0.22.0
go.opentelemetry.io/otel v1.21.0 go.opentelemetry.io/otel v1.26.0
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0
go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/sdk v1.26.0
go.opentelemetry.io/otel/trace v1.21.0 go.opentelemetry.io/otel/trace v1.26.0
go.opentelemetry.io/proto/otlp v1.0.0 go.opentelemetry.io/proto/otlp v1.2.0
go.uber.org/automaxprocs v1.4.0 go.uber.org/automaxprocs v1.4.0
golang.org/x/crypto v0.21.0 golang.org/x/crypto v0.23.0
golang.org/x/net v0.21.0 golang.org/x/net v0.25.0
golang.org/x/sync v0.4.0 golang.org/x/sync v0.6.0
golang.org/x/sys v0.18.0 golang.org/x/sys v0.20.0
golang.org/x/term v0.18.0 golang.org/x/term v0.20.0
google.golang.org/protobuf v1.31.0 google.golang.org/protobuf v1.34.1
gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0
gopkg.in/yaml.v3 v3.0.1 gopkg.in/yaml.v3 v3.0.1
nhooyr.io/websocket v1.8.7 nhooyr.io/websocket v1.8.7
@ -61,14 +61,14 @@ require (
github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 // indirect
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 // indirect
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 // indirect
github.com/go-logr/logr v1.3.0 // indirect github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/stdr v1.2.2 // indirect github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect
github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 // indirect github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 // indirect
github.com/gobwas/pool v0.2.1 // indirect github.com/gobwas/pool v0.2.1 // indirect
github.com/golang/protobuf v1.5.3 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 // indirect
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
github.com/klauspost/compress v1.15.11 // indirect github.com/klauspost/compress v1.15.11 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
@ -83,17 +83,17 @@ require (
github.com/prometheus/common v0.37.0 // indirect github.com/prometheus/common v0.37.0 // indirect
github.com/prometheus/procfs v0.8.0 // indirect github.com/prometheus/procfs v0.8.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect
go.opentelemetry.io/otel/metric v1.21.0 // indirect go.opentelemetry.io/otel/metric v1.26.0 // indirect
go.uber.org/mock v0.4.0 // indirect go.uber.org/mock v0.4.0 // indirect
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
golang.org/x/mod v0.11.0 // indirect golang.org/x/mod v0.11.0 // indirect
golang.org/x/oauth2 v0.13.0 // indirect golang.org/x/oauth2 v0.17.0 // indirect
golang.org/x/text v0.14.0 // indirect golang.org/x/text v0.15.0 // indirect
golang.org/x/tools v0.9.1 // indirect golang.org/x/tools v0.9.1 // indirect
google.golang.org/appengine v1.6.8 // indirect google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de // indirect
google.golang.org/grpc v1.60.0 // indirect google.golang.org/grpc v1.63.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect
) )

94
go.sum
View File

@ -119,8 +119,8 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
@ -152,8 +152,6 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4= github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3 h1:zN2lZNZRflqFyxVaTIU61KNKQ9C0055u9CAfpmqUvo4=
github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU= github.com/golang-collections/collections v0.0.0-20130729185459-604e922904d3/go.mod h1:nPpo7qLxd6XL3hWJG/O60sR8ZKfMCiIoNap5GvD12KU=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -180,8 +178,8 @@ github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
@ -212,15 +210,15 @@ github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJY
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.1 h1:KjJaJ9iWZ3jOFZIf1Lqf4laDRCasjl0BCmnEGxkdLb4= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.3.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1 h1:/c3QmbOGMGTOumP2iT/rCwB7b0QDGLKzqOmktBjT+Is=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.1/go.mod h1:5SN9VR2LTsRFsrEC6FHgRbTWrTHu6tqPeKxEQv15giM=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1:MJG/KsmcqMwFAkh8mTnAwhyKoB+sTAnY4CACC110tbU=
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
@ -344,8 +342,8 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ= github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ=
github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE= github.com/tinylib/msgp v1.1.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
@ -366,19 +364,19 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opentelemetry.io/contrib/propagators v0.22.0 h1:KGdv58M2//veiYLIhb31mofaI2LgkIPXXAZVeYVyfd8= go.opentelemetry.io/contrib/propagators v0.22.0 h1:KGdv58M2//veiYLIhb31mofaI2LgkIPXXAZVeYVyfd8=
go.opentelemetry.io/contrib/propagators v0.22.0/go.mod h1:xGOuXr6lLIF9BXipA4pm6UuOSI0M98U6tsI3khbOiwU= go.opentelemetry.io/contrib/propagators v0.22.0/go.mod h1:xGOuXr6lLIF9BXipA4pm6UuOSI0M98U6tsI3khbOiwU=
go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM= go.opentelemetry.io/otel v1.0.0-RC2/go.mod h1:w1thVQ7qbAy8MHb0IFj8a5Q2QU0l2ksf8u/CN8m3NOM=
go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= go.opentelemetry.io/otel v1.26.0 h1:LQwgL5s/1W7YiiRwxf03QGnWLb2HW4pLiAhaA5cZXBs=
go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= go.opentelemetry.io/otel v1.26.0/go.mod h1:UmLkJHUAidDval2EICqBMbnAd0/m2vmpf/dAM+fvFs4=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0 h1:1u/AyyOqAWzy+SkPxDpahCNZParHV8Vid1RnI2clyDE=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.26.0/go.mod h1:z46paqbJ9l7c9fIPCXTqTGwhQZ5XoTIsfeFYWboizjs=
go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= go.opentelemetry.io/otel/metric v1.26.0 h1:7S39CLuY5Jgg9CrnA9HHiEjGMF/X2VHvoXGgSllRz30=
go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= go.opentelemetry.io/otel/metric v1.26.0/go.mod h1:SY+rHOI4cEawI9a7N1A4nIg/nTQXe1ccCNWYOJUrpX4=
go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= go.opentelemetry.io/otel/sdk v1.26.0 h1:Y7bumHf5tAiDlRYFmGqetNcLaVUZmh4iYfmGxtmz7F8=
go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= go.opentelemetry.io/otel/sdk v1.26.0/go.mod h1:0p8MXpqLeJ0pzcszQQN4F0S5FVjBLgypeGSngLsmirs=
go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4= go.opentelemetry.io/otel/trace v1.0.0-RC2/go.mod h1:JPQ+z6nNw9mqEGT8o3eoPTdnNI+Aj5JcxEsVGREIAy4=
go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= go.opentelemetry.io/otel/trace v1.26.0 h1:1ieeAUb4y0TE26jUFrCIXKpTuVK7uJGN9/Z/2LP5sQA=
go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= go.opentelemetry.io/otel/trace v1.26.0/go.mod h1:4iDxvGDQuUkHve82hJJ8UqrwswHYsZuWCBllGV2U2y0=
go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= go.opentelemetry.io/proto/otlp v1.2.0 h1:pVeZGk7nXDC9O2hncA6nHldxEjm6LByfA2aN8IOkz94=
go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.opentelemetry.io/proto/otlp v1.2.0/go.mod h1:gGpR8txAl5M03pDhMC79G6SdqNV26naRm/KDsgaHD8A=
go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0= go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0=
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU=
@ -390,8 +388,8 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA= golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs= golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -463,8 +461,8 @@ golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qx
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.21.0 h1:AQyQV4dYCvJ7vGmJyKki9+PBdyvhkSd8EIx/qb0AYv4= golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -472,8 +470,8 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.13.0 h1:jDDenyj+WgFtmV3zYVoi8aE2BwtXFLWOA67ZfNWftiY= golang.org/x/oauth2 v0.17.0 h1:6m3ZPmLEFdVxKKWnKq4VqZ60gutO35zm+zrAHVmHyDQ=
golang.org/x/oauth2 v0.13.0/go.mod h1:/JMhi4ZRXAf4HG9LiNmxvk+45+96RUlVThiH8FzNBn0= golang.org/x/oauth2 v0.17.0/go.mod h1:OzPDGQiuQMguemayvdylqddI7qcD9lnSDb+1FiwQ5HA=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -485,8 +483,8 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.4.0 h1:zxkM55ReGkDlKSM+Fu41A+zmbZuaPVbGMzvvdUPznYQ= golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.4.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -533,12 +531,12 @@ golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8= golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58= golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -547,8 +545,8 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -656,12 +654,12 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97 h1:SeZZZx0cP0fqUyA+oRzP9k7cSwJlvDFiROO72uwD6i0= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de h1:F6qOa9AZTYJXOUEr4jDysRDLrm4PHePlge4v4TGAlxY=
google.golang.org/genproto v0.0.0-20231002182017-d307bd883b97/go.mod h1:t1VqOqqvce95G3hIDCT5FeO3YUc6Q4Oe24L/+rNMxRk= google.golang.org/genproto v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:VUhTRKeHn9wwcdrk73nvdC9gF178Tzhmt/qyaFcPLSo=
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97 h1:W18sezcAYs+3tDZX4F80yctqa12jcP1PUS2gQu1zTPU= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de h1:jFNzHPIeuzhdRwVhbZdiym9q0ory/xY3sA+v2wPg8I0=
google.golang.org/genproto/googleapis/api v0.0.0-20231002182017-d307bd883b97/go.mod h1:iargEX0SFPm3xcfMI0d1domjg0ZF4Aa0p2awqyxhvF0= google.golang.org/genproto/googleapis/api v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:5iCWqnniDlqZHrd3neWVTOwvh/v6s3232omMecelax8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0= google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de h1:cZGRis4/ot9uVm639a+rHCUaG0JJHEsdyzSQTMX+suY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY= google.golang.org/genproto/googleapis/rpc v0.0.0-20240227224415-6ceb2ff114de/go.mod h1:H4O17MA/PE9BsGx3w+a+W2VOLLD1Qf7oJneAoU6WktY=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -674,8 +672,8 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.60.0 h1:6FQAR0kM31P6MRdeluor2w2gPaS4SVNrD/DNTxrQ15k= google.golang.org/grpc v1.63.0 h1:WjKe+dnvABXyPJMD7KDNLxtoGk5tgk+YFWN6cBWjZE8=
google.golang.org/grpc v1.60.0/go.mod h1:OlCHIeLYqSSsLi6i49B5QGdzaMZK9+M7LXN2FKz4eGM= google.golang.org/grpc v1.63.0/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
@ -688,8 +686,8 @@ google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGj
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -91,11 +91,12 @@ logr design but also left out some parts and changed others:
| Adding a name to a logger | `WithName` | no API | | Adding a name to a logger | `WithName` | no API |
| Modify verbosity of log entries in a call chain | `V` | no API | | Modify verbosity of log entries in a call chain | `V` | no API |
| Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` | | Grouping of key/value pairs | not supported | `WithGroup`, `GroupValue` |
| Pass context for extracting additional values | no API | API variants like `InfoCtx` |
The high-level slog API is explicitly meant to be one of many different APIs The high-level slog API is explicitly meant to be one of many different APIs
that can be layered on top of a shared `slog.Handler`. logr is one such that can be layered on top of a shared `slog.Handler`. logr is one such
alternative API, with [interoperability](#slog-interoperability) provided by the [`slogr`](slogr) alternative API, with [interoperability](#slog-interoperability) provided by
package. some conversion functions.
### Inspiration ### Inspiration
@ -145,24 +146,24 @@ There are implementations for the following logging libraries:
## slog interoperability ## slog interoperability
Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler` Interoperability goes both ways, using the `logr.Logger` API with a `slog.Handler`
and using the `slog.Logger` API with a `logr.LogSink`. [slogr](./slogr) provides `NewLogr` and and using the `slog.Logger` API with a `logr.LogSink`. `FromSlogHandler` and
`NewSlogHandler` API calls to convert between a `logr.Logger` and a `slog.Handler`. `ToSlogHandler` convert between a `logr.Logger` and a `slog.Handler`.
As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level As usual, `slog.New` can be used to wrap such a `slog.Handler` in the high-level
slog API. `slogr` itself leaves that to the caller. slog API.
## Using a `logr.Sink` as backend for slog ### Using a `logr.LogSink` as backend for slog
Ideally, a logr sink implementation should support both logr and slog by Ideally, a logr sink implementation should support both logr and slog by
implementing both the normal logr interface(s) and `slogr.SlogSink`. Because implementing both the normal logr interface(s) and `SlogSink`. Because
of a conflict in the parameters of the common `Enabled` method, it is [not of a conflict in the parameters of the common `Enabled` method, it is [not
possible to implement both slog.Handler and logr.Sink in the same possible to implement both slog.Handler and logr.Sink in the same
type](https://github.com/golang/go/issues/59110). type](https://github.com/golang/go/issues/59110).
If both are supported, log calls can go from the high-level APIs to the backend If both are supported, log calls can go from the high-level APIs to the backend
without the need to convert parameters. `NewLogr` and `NewSlogHandler` can without the need to convert parameters. `FromSlogHandler` and `ToSlogHandler` can
convert back and forth without adding additional wrappers, with one exception: convert back and forth without adding additional wrappers, with one exception:
when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then when `Logger.V` was used to adjust the verbosity for a `slog.Handler`, then
`NewSlogHandler` has to use a wrapper which adjusts the verbosity for future `ToSlogHandler` has to use a wrapper which adjusts the verbosity for future
log calls. log calls.
Such an implementation should also support values that implement specific Such an implementation should also support values that implement specific
@ -187,13 +188,13 @@ Not supporting slog has several drawbacks:
These drawbacks are severe enough that applications using a mixture of slog and These drawbacks are severe enough that applications using a mixture of slog and
logr should switch to a different backend. logr should switch to a different backend.
## Using a `slog.Handler` as backend for logr ### Using a `slog.Handler` as backend for logr
Using a plain `slog.Handler` without support for logr works better than the Using a plain `slog.Handler` without support for logr works better than the
other direction: other direction:
- All logr verbosity levels can be mapped 1:1 to their corresponding slog level - All logr verbosity levels can be mapped 1:1 to their corresponding slog level
by negating them. by negating them.
- Stack unwinding is done by the `slogr.SlogSink` and the resulting program - Stack unwinding is done by the `SlogSink` and the resulting program
counter is passed to the `slog.Handler`. counter is passed to the `slog.Handler`.
- Names added via `Logger.WithName` are gathered and recorded in an additional - Names added via `Logger.WithName` are gathered and recorded in an additional
attribute with `logger` as key and the names separated by slash as value. attribute with `logger` as key and the names separated by slash as value.
@ -205,27 +206,39 @@ ideally support both `logr.Marshaler` and `slog.Valuer`. If compatibility
with logr implementations without slog support is not important, then with logr implementations without slog support is not important, then
`slog.Valuer` is sufficient. `slog.Valuer` is sufficient.
## Context support for slog ### Context support for slog
Storing a logger in a `context.Context` is not supported by Storing a logger in a `context.Context` is not supported by
slog. `logr.NewContext` and `logr.FromContext` can be used with slog like this slog. `NewContextWithSlogLogger` and `FromContextAsSlogLogger` can be
to fill this gap: used to fill this gap. They store and retrieve a `slog.Logger` pointer
under the same context key that is also used by `NewContext` and
`FromContext` for `logr.Logger` value.
func HandlerFromContext(ctx context.Context) slog.Handler { When `NewContextWithSlogLogger` is followed by `FromContext`, the latter will
logger, err := logr.FromContext(ctx) automatically convert the `slog.Logger` to a
if err == nil { `logr.Logger`. `FromContextAsSlogLogger` does the same for the other direction.
return slogr.NewSlogHandler(logger)
}
return slog.Default().Handler()
}
func ContextWithHandler(ctx context.Context, handler slog.Handler) context.Context { With this approach, binaries which use either slog or logr are as efficient as
return logr.NewContext(ctx, slogr.NewLogr(handler)) possible with no unnecessary allocations. This is also why the API stores a
} `slog.Logger` pointer: when storing a `slog.Handler`, creating a `slog.Logger`
on retrieval would need to allocate one.
The downside is that storing and retrieving a `slog.Handler` needs more The downside is that switching back and forth needs more allocations. Because
allocations compared to using a `logr.Logger`. Therefore the recommendation is logr is the API that is already in use by different packages, in particular
to use the `logr.Logger` API in code which uses contextual logging. Kubernetes, the recommendation is to use the `logr.Logger` API in code which
uses contextual logging.
An alternative to adding values to a logger and storing that logger in the
context is to store the values in the context and to configure a logging
backend to extract those values when emitting log entries. This only works when
log calls are passed the context, which is not supported by the logr API.
With the slog API, it is possible, but not
required. https://github.com/veqryn/slog-context is a package for slog which
provides additional support code for this approach. It also contains wrappers
for the context functions in logr, so developers who prefer to not use the logr
APIs directly can use those instead and the resulting code will still be
interoperable with logr.
## FAQ ## FAQ

33
vendor/github.com/go-logr/logr/context.go generated vendored Normal file
View File

@ -0,0 +1,33 @@
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
// contextKey is how we find Loggers in a context.Context. With Go < 1.21,
// the value is always a Logger value. With Go >= 1.21, the value can be a
// Logger value or a slog.Logger pointer.
type contextKey struct{}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}

49
vendor/github.com/go-logr/logr/context_noslog.go generated vendored Normal file
View File

@ -0,0 +1,49 @@
//go:build !go1.21
// +build !go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

83
vendor/github.com/go-logr/logr/context_slog.go generated vendored Normal file
View File

@ -0,0 +1,83 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2019 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"fmt"
"log/slog"
)
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
v := ctx.Value(contextKey{})
if v == nil {
return Logger{}, notFoundError{}
}
switch v := v.(type) {
case Logger:
return v, nil
case *slog.Logger:
return FromSlogHandler(v.Handler()), nil
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextAsSlogLogger returns a slog.Logger from ctx or nil if no such Logger is found.
func FromContextAsSlogLogger(ctx context.Context) *slog.Logger {
v := ctx.Value(contextKey{})
if v == nil {
return nil
}
switch v := v.(type) {
case Logger:
return slog.New(ToSlogHandler(v))
case *slog.Logger:
return v
default:
// Not reached.
panic(fmt.Sprintf("unexpected value type for logr context key: %T", v))
}
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if logger, err := FromContext(ctx); err == nil {
return logger
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// NewContextWithSlogLogger returns a new Context, derived from ctx, which carries the
// provided slog.Logger.
func NewContextWithSlogLogger(ctx context.Context, logger *slog.Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}

View File

@ -100,6 +100,11 @@ type Options struct {
// details, see docs for Go's time.Layout. // details, see docs for Go's time.Layout.
TimestampFormat string TimestampFormat string
// LogInfoLevel tells funcr what key to use to log the info level.
// If not specified, the info level will be logged as "level".
// If this is set to "", the info level will not be logged at all.
LogInfoLevel *string
// Verbosity tells funcr which V logs to produce. Higher values enable // Verbosity tells funcr which V logs to produce. Higher values enable
// more logs. Info logs at or below this level will be written, while logs // more logs. Info logs at or below this level will be written, while logs
// above this level will be discarded. // above this level will be discarded.
@ -213,6 +218,10 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
if opts.MaxLogDepth == 0 { if opts.MaxLogDepth == 0 {
opts.MaxLogDepth = defaultMaxLogDepth opts.MaxLogDepth = defaultMaxLogDepth
} }
if opts.LogInfoLevel == nil {
opts.LogInfoLevel = new(string)
*opts.LogInfoLevel = "level"
}
f := Formatter{ f := Formatter{
outputFormat: outfmt, outputFormat: outfmt,
prefix: "", prefix: "",
@ -227,12 +236,15 @@ func newFormatter(opts Options, outfmt outputFormat) Formatter {
// implementation. It should be constructed with NewFormatter. Some of // implementation. It should be constructed with NewFormatter. Some of
// its methods directly implement logr.LogSink. // its methods directly implement logr.LogSink.
type Formatter struct { type Formatter struct {
outputFormat outputFormat outputFormat outputFormat
prefix string prefix string
values []any values []any
valuesStr string valuesStr string
depth int parentValuesStr string
opts *Options depth int
opts *Options
group string // for slog groups
groupDepth int
} }
// outputFormat indicates which outputFormat to use. // outputFormat indicates which outputFormat to use.
@ -253,33 +265,62 @@ func (f Formatter) render(builtins, args []any) string {
// Empirically bytes.Buffer is faster than strings.Builder for this. // Empirically bytes.Buffer is faster than strings.Builder for this.
buf := bytes.NewBuffer(make([]byte, 0, 1024)) buf := bytes.NewBuffer(make([]byte, 0, 1024))
if f.outputFormat == outputJSON { if f.outputFormat == outputJSON {
buf.WriteByte('{') buf.WriteByte('{') // for the whole line
} }
vals := builtins vals := builtins
if hook := f.opts.RenderBuiltinsHook; hook != nil { if hook := f.opts.RenderBuiltinsHook; hook != nil {
vals = hook(f.sanitize(vals)) vals = hook(f.sanitize(vals))
} }
f.flatten(buf, vals, false, false) // keys are ours, no need to escape f.flatten(buf, vals, false, false) // keys are ours, no need to escape
continuing := len(builtins) > 0 continuing := len(builtins) > 0
if len(f.valuesStr) > 0 {
if f.parentValuesStr != "" {
if continuing { if continuing {
if f.outputFormat == outputJSON { buf.WriteByte(f.comma())
buf.WriteByte(',')
} else {
buf.WriteByte(' ')
}
} }
buf.WriteString(f.parentValuesStr)
continuing = true continuing = true
buf.WriteString(f.valuesStr)
} }
groupDepth := f.groupDepth
if f.group != "" {
if f.valuesStr != "" || len(args) != 0 {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
} else {
// The group was empty
groupDepth--
}
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
continuing = true
}
vals = args vals = args
if hook := f.opts.RenderArgsHook; hook != nil { if hook := f.opts.RenderArgsHook; hook != nil {
vals = hook(f.sanitize(vals)) vals = hook(f.sanitize(vals))
} }
f.flatten(buf, vals, continuing, true) // escape user-provided keys f.flatten(buf, vals, continuing, true) // escape user-provided keys
if f.outputFormat == outputJSON {
buf.WriteByte('}') for i := 0; i < groupDepth; i++ {
buf.WriteByte('}') // for the groups
} }
if f.outputFormat == outputJSON {
buf.WriteByte('}') // for the whole line
}
return buf.String() return buf.String()
} }
@ -298,9 +339,16 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
if len(kvList)%2 != 0 { if len(kvList)%2 != 0 {
kvList = append(kvList, noValue) kvList = append(kvList, noValue)
} }
copied := false
for i := 0; i < len(kvList); i += 2 { for i := 0; i < len(kvList); i += 2 {
k, ok := kvList[i].(string) k, ok := kvList[i].(string)
if !ok { if !ok {
if !copied {
newList := make([]any, len(kvList))
copy(newList, kvList)
kvList = newList
copied = true
}
k = f.nonStringKey(kvList[i]) k = f.nonStringKey(kvList[i])
kvList[i] = k kvList[i] = k
} }
@ -308,7 +356,7 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
if i > 0 || continuing { if i > 0 || continuing {
if f.outputFormat == outputJSON { if f.outputFormat == outputJSON {
buf.WriteByte(',') buf.WriteByte(f.comma())
} else { } else {
// In theory the format could be something we don't understand. In // In theory the format could be something we don't understand. In
// practice, we control it, so it won't be. // practice, we control it, so it won't be.
@ -316,24 +364,35 @@ func (f Formatter) flatten(buf *bytes.Buffer, kvList []any, continuing bool, esc
} }
} }
if escapeKeys { buf.WriteString(f.quoted(k, escapeKeys))
buf.WriteString(prettyString(k)) buf.WriteByte(f.colon())
} else {
// this is faster
buf.WriteByte('"')
buf.WriteString(k)
buf.WriteByte('"')
}
if f.outputFormat == outputJSON {
buf.WriteByte(':')
} else {
buf.WriteByte('=')
}
buf.WriteString(f.pretty(v)) buf.WriteString(f.pretty(v))
} }
return kvList return kvList
} }
func (f Formatter) quoted(str string, escape bool) string {
if escape {
return prettyString(str)
}
// this is faster
return `"` + str + `"`
}
func (f Formatter) comma() byte {
if f.outputFormat == outputJSON {
return ','
}
return ' '
}
func (f Formatter) colon() byte {
if f.outputFormat == outputJSON {
return ':'
}
return '='
}
func (f Formatter) pretty(value any) string { func (f Formatter) pretty(value any) string {
return f.prettyWithFlags(value, 0, 0) return f.prettyWithFlags(value, 0, 0)
} }
@ -407,12 +466,12 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
} }
for i := 0; i < len(v); i += 2 { for i := 0; i < len(v); i += 2 {
if i > 0 { if i > 0 {
buf.WriteByte(',') buf.WriteByte(f.comma())
} }
k, _ := v[i].(string) // sanitize() above means no need to check success k, _ := v[i].(string) // sanitize() above means no need to check success
// arbitrary keys might need escaping // arbitrary keys might need escaping
buf.WriteString(prettyString(k)) buf.WriteString(prettyString(k))
buf.WriteByte(':') buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
} }
if flags&flagRawStruct == 0 { if flags&flagRawStruct == 0 {
@ -481,7 +540,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
continue continue
} }
if printComma { if printComma {
buf.WriteByte(',') buf.WriteByte(f.comma())
} }
printComma = true // if we got here, we are rendering a field printComma = true // if we got here, we are rendering a field
if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" {
@ -492,10 +551,8 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
name = fld.Name name = fld.Name
} }
// field names can't contain characters which need escaping // field names can't contain characters which need escaping
buf.WriteByte('"') buf.WriteString(f.quoted(name, false))
buf.WriteString(name) buf.WriteByte(f.colon())
buf.WriteByte('"')
buf.WriteByte(':')
buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1))
} }
if flags&flagRawStruct == 0 { if flags&flagRawStruct == 0 {
@ -520,7 +577,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
buf.WriteByte('[') buf.WriteByte('[')
for i := 0; i < v.Len(); i++ { for i := 0; i < v.Len(); i++ {
if i > 0 { if i > 0 {
buf.WriteByte(',') buf.WriteByte(f.comma())
} }
e := v.Index(i) e := v.Index(i)
buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1))
@ -534,7 +591,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
i := 0 i := 0
for it.Next() { for it.Next() {
if i > 0 { if i > 0 {
buf.WriteByte(',') buf.WriteByte(f.comma())
} }
// If a map key supports TextMarshaler, use it. // If a map key supports TextMarshaler, use it.
keystr := "" keystr := ""
@ -556,7 +613,7 @@ func (f Formatter) prettyWithFlags(value any, flags uint32, depth int) string {
} }
} }
buf.WriteString(keystr) buf.WriteString(keystr)
buf.WriteByte(':') buf.WriteByte(f.colon())
buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1))
i++ i++
} }
@ -706,6 +763,53 @@ func (f Formatter) sanitize(kvList []any) []any {
return kvList return kvList
} }
// startGroup opens a new group scope (basically a sub-struct), which locks all
// the current saved values and starts them anew. This is needed to satisfy
// slog.
func (f *Formatter) startGroup(group string) {
// Unnamed groups are just inlined.
if group == "" {
return
}
// Any saved values can no longer be changed.
buf := bytes.NewBuffer(make([]byte, 0, 1024))
continuing := false
if f.parentValuesStr != "" {
buf.WriteString(f.parentValuesStr)
continuing = true
}
if f.group != "" && f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.quoted(f.group, true)) // escape user-provided keys
buf.WriteByte(f.colon())
buf.WriteByte('{') // for the group
continuing = false
}
if f.valuesStr != "" {
if continuing {
buf.WriteByte(f.comma())
}
buf.WriteString(f.valuesStr)
}
// NOTE: We don't close the scope here - that's done later, when a log line
// is actually rendered (because we have N scopes to close).
f.parentValuesStr = buf.String()
// Start collecting new values.
f.group = group
f.groupDepth++
f.valuesStr = ""
f.values = nil
}
// Init configures this Formatter from runtime info, such as the call depth // Init configures this Formatter from runtime info, such as the call depth
// imposed by logr itself. // imposed by logr itself.
// Note that this receiver is a pointer, so depth can be saved. // Note that this receiver is a pointer, so depth can be saved.
@ -740,7 +844,10 @@ func (f Formatter) FormatInfo(level int, msg string, kvList []any) (prefix, args
if policy := f.opts.LogCaller; policy == All || policy == Info { if policy := f.opts.LogCaller; policy == All || policy == Info {
args = append(args, "caller", f.caller()) args = append(args, "caller", f.caller())
} }
args = append(args, "level", level, "msg", msg) if key := *f.opts.LogInfoLevel; key != "" {
args = append(args, key, level)
}
args = append(args, "msg", msg)
return prefix, f.render(args, kvList) return prefix, f.render(args, kvList)
} }

105
vendor/github.com/go-logr/logr/funcr/slogsink.go generated vendored Normal file
View File

@ -0,0 +1,105 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package funcr
import (
"context"
"log/slog"
"github.com/go-logr/logr"
)
var _ logr.SlogSink = &fnlogger{}
const extraSlogSinkDepth = 3 // 2 for slog, 1 for SlogSink
func (l fnlogger) Handle(_ context.Context, record slog.Record) error {
kvList := make([]any, 0, 2*record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
kvList = attrToKVs(attr, kvList)
return true
})
if record.Level >= slog.LevelError {
l.WithCallDepth(extraSlogSinkDepth).Error(nil, record.Message, kvList...)
} else {
level := l.levelFromSlog(record.Level)
l.WithCallDepth(extraSlogSinkDepth).Info(level, record.Message, kvList...)
}
return nil
}
func (l fnlogger) WithAttrs(attrs []slog.Attr) logr.SlogSink {
kvList := make([]any, 0, 2*len(attrs))
for _, attr := range attrs {
kvList = attrToKVs(attr, kvList)
}
l.AddValues(kvList)
return &l
}
func (l fnlogger) WithGroup(name string) logr.SlogSink {
l.startGroup(name)
return &l
}
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
// and other details of slog.
func attrToKVs(attr slog.Attr, kvList []any) []any {
attrVal := attr.Value.Resolve()
if attrVal.Kind() == slog.KindGroup {
groupVal := attrVal.Group()
grpKVs := make([]any, 0, 2*len(groupVal))
for _, attr := range groupVal {
grpKVs = attrToKVs(attr, grpKVs)
}
if attr.Key == "" {
// slog says we have to inline these
kvList = append(kvList, grpKVs...)
} else {
kvList = append(kvList, attr.Key, PseudoStruct(grpKVs))
}
} else if attr.Key != "" {
kvList = append(kvList, attr.Key, attrVal.Any())
}
return kvList
}
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
// It ensures that the result is >= 0. This is necessary because the result is
// passed to a LogSink and that API did not historically document whether
// levels could be negative or what that meant.
//
// Some example usage:
//
// logrV0 := getMyLogger()
// logrV2 := logrV0.V(2)
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
func (l fnlogger) levelFromSlog(level slog.Level) int {
result := -level
if result < 0 {
result = 0 // because LogSink doesn't expect negative V levels
}
return int(result)
}

View File

@ -207,10 +207,6 @@ limitations under the License.
// those. // those.
package logr package logr
import (
"context"
)
// New returns a new Logger instance. This is primarily used by libraries // New returns a new Logger instance. This is primarily used by libraries
// implementing LogSink, rather than end users. Passing a nil sink will create // implementing LogSink, rather than end users. Passing a nil sink will create
// a Logger which discards all log lines. // a Logger which discards all log lines.
@ -410,45 +406,6 @@ func (l Logger) IsZero() bool {
return l.sink == nil return l.sink == nil
} }
// contextKey is how we find Loggers in a context.Context.
type contextKey struct{}
// FromContext returns a Logger from ctx or an error if no Logger is found.
func FromContext(ctx context.Context) (Logger, error) {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v, nil
}
return Logger{}, notFoundError{}
}
// notFoundError exists to carry an IsNotFound method.
type notFoundError struct{}
func (notFoundError) Error() string {
return "no logr.Logger was present"
}
func (notFoundError) IsNotFound() bool {
return true
}
// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this
// returns a Logger that discards all log messages.
func FromContextOrDiscard(ctx context.Context) Logger {
if v, ok := ctx.Value(contextKey{}).(Logger); ok {
return v
}
return Discard()
}
// NewContext returns a new Context, derived from ctx, which carries the
// provided Logger.
func NewContext(ctx context.Context, logger Logger) context.Context {
return context.WithValue(ctx, contextKey{}, logger)
}
// RuntimeInfo holds information that the logr "core" library knows which // RuntimeInfo holds information that the logr "core" library knows which
// LogSinks might want to know. // LogSinks might want to know.
type RuntimeInfo struct { type RuntimeInfo struct {

192
vendor/github.com/go-logr/logr/sloghandler.go generated vendored Normal file
View File

@ -0,0 +1,192 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
type slogHandler struct {
// May be nil, in which case all logs get discarded.
sink LogSink
// Non-nil if sink is non-nil and implements SlogSink.
slogSink SlogSink
// groupPrefix collects values from WithGroup calls. It gets added as
// prefix to value keys when handling a log record.
groupPrefix string
// levelBias can be set when constructing the handler to influence the
// slog.Level of log records. A positive levelBias reduces the
// slog.Level value. slog has no API to influence this value after the
// handler got created, so it can only be set indirectly through
// Logger.V.
levelBias slog.Level
}
var _ slog.Handler = &slogHandler{}
// groupSeparator is used to concatenate WithGroup names and attribute keys.
const groupSeparator = "."
// GetLevel is used for black box unit testing.
func (l *slogHandler) GetLevel() slog.Level {
return l.levelBias
}
func (l *slogHandler) Enabled(_ context.Context, level slog.Level) bool {
return l.sink != nil && (level >= slog.LevelError || l.sink.Enabled(l.levelFromSlog(level)))
}
func (l *slogHandler) Handle(ctx context.Context, record slog.Record) error {
if l.slogSink != nil {
// Only adjust verbosity level of log entries < slog.LevelError.
if record.Level < slog.LevelError {
record.Level -= l.levelBias
}
return l.slogSink.Handle(ctx, record)
}
// No need to check for nil sink here because Handle will only be called
// when Enabled returned true.
kvList := make([]any, 0, 2*record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
return true
})
if record.Level >= slog.LevelError {
l.sinkWithCallDepth().Error(nil, record.Message, kvList...)
} else {
level := l.levelFromSlog(record.Level)
l.sinkWithCallDepth().Info(level, record.Message, kvList...)
}
return nil
}
// sinkWithCallDepth adjusts the stack unwinding so that when Error or Info
// are called by Handle, code in slog gets skipped.
//
// This offset currently (Go 1.21.0) works for calls through
// slog.New(ToSlogHandler(...)). There's no guarantee that the call
// chain won't change. Wrapping the handler will also break unwinding. It's
// still better than not adjusting at all....
//
// This cannot be done when constructing the handler because FromSlogHandler needs
// access to the original sink without this adjustment. A second copy would
// work, but then WithAttrs would have to be called for both of them.
func (l *slogHandler) sinkWithCallDepth() LogSink {
if sink, ok := l.sink.(CallDepthLogSink); ok {
return sink.WithCallDepth(2)
}
return l.sink
}
func (l *slogHandler) WithAttrs(attrs []slog.Attr) slog.Handler {
if l.sink == nil || len(attrs) == 0 {
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithAttrs(attrs)
clone.sink = clone.slogSink
} else {
kvList := make([]any, 0, 2*len(attrs))
for _, attr := range attrs {
kvList = attrToKVs(attr, l.groupPrefix, kvList)
}
clone.sink = l.sink.WithValues(kvList...)
}
return &clone
}
func (l *slogHandler) WithGroup(name string) slog.Handler {
if l.sink == nil {
return l
}
if name == "" {
// slog says to inline empty groups
return l
}
clone := *l
if l.slogSink != nil {
clone.slogSink = l.slogSink.WithGroup(name)
clone.sink = clone.slogSink
} else {
clone.groupPrefix = addPrefix(clone.groupPrefix, name)
}
return &clone
}
// attrToKVs appends a slog.Attr to a logr-style kvList. It handle slog Groups
// and other details of slog.
func attrToKVs(attr slog.Attr, groupPrefix string, kvList []any) []any {
attrVal := attr.Value.Resolve()
if attrVal.Kind() == slog.KindGroup {
groupVal := attrVal.Group()
grpKVs := make([]any, 0, 2*len(groupVal))
prefix := groupPrefix
if attr.Key != "" {
prefix = addPrefix(groupPrefix, attr.Key)
}
for _, attr := range groupVal {
grpKVs = attrToKVs(attr, prefix, grpKVs)
}
kvList = append(kvList, grpKVs...)
} else if attr.Key != "" {
kvList = append(kvList, addPrefix(groupPrefix, attr.Key), attrVal.Any())
}
return kvList
}
func addPrefix(prefix, name string) string {
if prefix == "" {
return name
}
if name == "" {
return prefix
}
return prefix + groupSeparator + name
}
// levelFromSlog adjusts the level by the logger's verbosity and negates it.
// It ensures that the result is >= 0. This is necessary because the result is
// passed to a LogSink and that API did not historically document whether
// levels could be negative or what that meant.
//
// Some example usage:
//
// logrV0 := getMyLogger()
// logrV2 := logrV0.V(2)
// slogV2 := slog.New(logr.ToSlogHandler(logrV2))
// slogV2.Debug("msg") // =~ logrV2.V(4) =~ logrV0.V(6)
// slogV2.Info("msg") // =~ logrV2.V(0) =~ logrV0.V(2)
// slogv2.Warn("msg") // =~ logrV2.V(-4) =~ logrV0.V(0)
func (l *slogHandler) levelFromSlog(level slog.Level) int {
result := -level
result += l.levelBias // in case the original Logger had a V level
if result < 0 {
result = 0 // because LogSink doesn't expect negative V levels
}
return int(result)
}

100
vendor/github.com/go-logr/logr/slogr.go generated vendored Normal file
View File

@ -0,0 +1,100 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
)
// FromSlogHandler returns a Logger which writes to the slog.Handler.
//
// The logr verbosity level is mapped to slog levels such that V(0) becomes
// slog.LevelInfo and V(4) becomes slog.LevelDebug.
func FromSlogHandler(handler slog.Handler) Logger {
if handler, ok := handler.(*slogHandler); ok {
if handler.sink == nil {
return Discard()
}
return New(handler.sink).V(int(handler.levelBias))
}
return New(&slogSink{handler: handler})
}
// ToSlogHandler returns a slog.Handler which writes to the same sink as the Logger.
//
// The returned logger writes all records with level >= slog.LevelError as
// error log entries with LogSink.Error, regardless of the verbosity level of
// the Logger:
//
// logger := <some Logger with 0 as verbosity level>
// slog.New(ToSlogHandler(logger.V(10))).Error(...) -> logSink.Error(...)
//
// The level of all other records gets reduced by the verbosity
// level of the Logger and the result is negated. If it happens
// to be negative, then it gets replaced by zero because a LogSink
// is not expected to handled negative levels:
//
// slog.New(ToSlogHandler(logger)).Debug(...) -> logger.GetSink().Info(level=4, ...)
// slog.New(ToSlogHandler(logger)).Warning(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger)).Info(...) -> logger.GetSink().Info(level=0, ...)
// slog.New(ToSlogHandler(logger.V(4))).Info(...) -> logger.GetSink().Info(level=4, ...)
func ToSlogHandler(logger Logger) slog.Handler {
if sink, ok := logger.GetSink().(*slogSink); ok && logger.GetV() == 0 {
return sink.handler
}
handler := &slogHandler{sink: logger.GetSink(), levelBias: slog.Level(logger.GetV())}
if slogSink, ok := handler.sink.(SlogSink); ok {
handler.slogSink = slogSink
}
return handler
}
// SlogSink is an optional interface that a LogSink can implement to support
// logging through the slog.Logger or slog.Handler APIs better. It then should
// also support special slog values like slog.Group. When used as a
// slog.Handler, the advantages are:
//
// - stack unwinding gets avoided in favor of logging the pre-recorded PC,
// as intended by slog
// - proper grouping of key/value pairs via WithGroup
// - verbosity levels > slog.LevelInfo can be recorded
// - less overhead
//
// Both APIs (Logger and slog.Logger/Handler) then are supported equally
// well. Developers can pick whatever API suits them better and/or mix
// packages which use either API in the same binary with a common logging
// implementation.
//
// This interface is necessary because the type implementing the LogSink
// interface cannot also implement the slog.Handler interface due to the
// different prototype of the common Enabled method.
//
// An implementation could support both interfaces in two different types, but then
// additional interfaces would be needed to convert between those types in FromSlogHandler
// and ToSlogHandler.
type SlogSink interface {
LogSink
Handle(ctx context.Context, record slog.Record) error
WithAttrs(attrs []slog.Attr) SlogSink
WithGroup(name string) SlogSink
}

120
vendor/github.com/go-logr/logr/slogsink.go generated vendored Normal file
View File

@ -0,0 +1,120 @@
//go:build go1.21
// +build go1.21
/*
Copyright 2023 The logr Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package logr
import (
"context"
"log/slog"
"runtime"
"time"
)
var (
_ LogSink = &slogSink{}
_ CallDepthLogSink = &slogSink{}
_ Underlier = &slogSink{}
)
// Underlier is implemented by the LogSink returned by NewFromLogHandler.
type Underlier interface {
// GetUnderlying returns the Handler used by the LogSink.
GetUnderlying() slog.Handler
}
const (
// nameKey is used to log the `WithName` values as an additional attribute.
nameKey = "logger"
// errKey is used to log the error parameter of Error as an additional attribute.
errKey = "err"
)
type slogSink struct {
callDepth int
name string
handler slog.Handler
}
func (l *slogSink) Init(info RuntimeInfo) {
l.callDepth = info.CallDepth
}
func (l *slogSink) GetUnderlying() slog.Handler {
return l.handler
}
func (l *slogSink) WithCallDepth(depth int) LogSink {
newLogger := *l
newLogger.callDepth += depth
return &newLogger
}
func (l *slogSink) Enabled(level int) bool {
return l.handler.Enabled(context.Background(), slog.Level(-level))
}
func (l *slogSink) Info(level int, msg string, kvList ...interface{}) {
l.log(nil, msg, slog.Level(-level), kvList...)
}
func (l *slogSink) Error(err error, msg string, kvList ...interface{}) {
l.log(err, msg, slog.LevelError, kvList...)
}
func (l *slogSink) log(err error, msg string, level slog.Level, kvList ...interface{}) {
var pcs [1]uintptr
// skip runtime.Callers, this function, Info/Error, and all helper functions above that.
runtime.Callers(3+l.callDepth, pcs[:])
record := slog.NewRecord(time.Now(), level, msg, pcs[0])
if l.name != "" {
record.AddAttrs(slog.String(nameKey, l.name))
}
if err != nil {
record.AddAttrs(slog.Any(errKey, err))
}
record.Add(kvList...)
_ = l.handler.Handle(context.Background(), record)
}
func (l slogSink) WithName(name string) LogSink {
if l.name != "" {
l.name += "/"
}
l.name += name
return &l
}
func (l slogSink) WithValues(kvList ...interface{}) LogSink {
l.handler = l.handler.WithAttrs(kvListToAttrs(kvList...))
return &l
}
func kvListToAttrs(kvList ...interface{}) []slog.Attr {
// We don't need the record itself, only its Add method.
record := slog.NewRecord(time.Time{}, 0, "", 0)
record.Add(kvList...)
attrs := make([]slog.Attr, 0, record.NumAttrs())
record.Attrs(func(attr slog.Attr) bool {
attrs = append(attrs, attr)
return true
})
return attrs
}

View File

@ -1,530 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jsonpb
import (
"encoding/json"
"errors"
"fmt"
"io"
"math"
"reflect"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapJSONUnmarshalV2 = false
// UnmarshalNext unmarshals the next JSON object from d into m.
func UnmarshalNext(d *json.Decoder, m proto.Message) error {
return new(Unmarshaler).UnmarshalNext(d, m)
}
// Unmarshal unmarshals a JSON object from r into m.
func Unmarshal(r io.Reader, m proto.Message) error {
return new(Unmarshaler).Unmarshal(r, m)
}
// UnmarshalString unmarshals a JSON object from s into m.
func UnmarshalString(s string, m proto.Message) error {
return new(Unmarshaler).Unmarshal(strings.NewReader(s), m)
}
// Unmarshaler is a configurable object for converting from a JSON
// representation to a protocol buffer object.
type Unmarshaler struct {
// AllowUnknownFields specifies whether to allow messages to contain
// unknown JSON fields, as opposed to failing to unmarshal.
AllowUnknownFields bool
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
// If unset, the global registry is used by default.
AnyResolver AnyResolver
}
// JSONPBUnmarshaler is implemented by protobuf messages that customize the way
// they are unmarshaled from JSON. Messages that implement this should also
// implement JSONPBMarshaler so that the custom format can be produced.
//
// The JSON unmarshaling must follow the JSON to proto specification:
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// Deprecated: Custom types should implement protobuf reflection instead.
type JSONPBUnmarshaler interface {
UnmarshalJSONPB(*Unmarshaler, []byte) error
}
// Unmarshal unmarshals a JSON object from r into m.
func (u *Unmarshaler) Unmarshal(r io.Reader, m proto.Message) error {
return u.UnmarshalNext(json.NewDecoder(r), m)
}
// UnmarshalNext unmarshals the next JSON object from d into m.
func (u *Unmarshaler) UnmarshalNext(d *json.Decoder, m proto.Message) error {
if m == nil {
return errors.New("invalid nil message")
}
// Parse the next JSON object from the stream.
raw := json.RawMessage{}
if err := d.Decode(&raw); err != nil {
return err
}
// Check for custom unmarshalers first since they may not properly
// implement protobuf reflection that the logic below relies on.
if jsu, ok := m.(JSONPBUnmarshaler); ok {
return jsu.UnmarshalJSONPB(u, raw)
}
mr := proto.MessageReflect(m)
// NOTE: For historical reasons, a top-level null is treated as a noop.
// This is incorrect, but kept for compatibility.
if string(raw) == "null" && mr.Descriptor().FullName() != "google.protobuf.Value" {
return nil
}
if wrapJSONUnmarshalV2 {
// NOTE: If input message is non-empty, we need to preserve merge semantics
// of the old jsonpb implementation. These semantics are not supported by
// the protobuf JSON specification.
isEmpty := true
mr.Range(func(protoreflect.FieldDescriptor, protoreflect.Value) bool {
isEmpty = false // at least one iteration implies non-empty
return false
})
if !isEmpty {
// Perform unmarshaling into a newly allocated, empty message.
mr = mr.New()
// Use a defer to copy all unmarshaled fields into the original message.
dst := proto.MessageReflect(m)
defer mr.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
dst.Set(fd, v)
return true
})
}
// Unmarshal using the v2 JSON unmarshaler.
opts := protojson.UnmarshalOptions{
DiscardUnknown: u.AllowUnknownFields,
}
if u.AnyResolver != nil {
opts.Resolver = anyResolver{u.AnyResolver}
}
return opts.Unmarshal(raw, mr.Interface())
} else {
if err := u.unmarshalMessage(mr, raw); err != nil {
return err
}
return protoV2.CheckInitialized(mr.Interface())
}
}
func (u *Unmarshaler) unmarshalMessage(m protoreflect.Message, in []byte) error {
md := m.Descriptor()
fds := md.Fields()
if jsu, ok := proto.MessageV1(m.Interface()).(JSONPBUnmarshaler); ok {
return jsu.UnmarshalJSONPB(u, in)
}
if string(in) == "null" && md.FullName() != "google.protobuf.Value" {
return nil
}
switch wellKnownType(md.FullName()) {
case "Any":
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return err
}
rawTypeURL, ok := jsonObject["@type"]
if !ok {
return errors.New("Any JSON doesn't have '@type'")
}
typeURL, err := unquoteString(string(rawTypeURL))
if err != nil {
return fmt.Errorf("can't unmarshal Any's '@type': %q", rawTypeURL)
}
m.Set(fds.ByNumber(1), protoreflect.ValueOfString(typeURL))
var m2 protoreflect.Message
if u.AnyResolver != nil {
mi, err := u.AnyResolver.Resolve(typeURL)
if err != nil {
return err
}
m2 = proto.MessageReflect(mi)
} else {
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
if err != nil {
if err == protoregistry.NotFound {
return fmt.Errorf("could not resolve Any message type: %v", typeURL)
}
return err
}
m2 = mt.New()
}
if wellKnownType(m2.Descriptor().FullName()) != "" {
rawValue, ok := jsonObject["value"]
if !ok {
return errors.New("Any JSON doesn't have 'value'")
}
if err := u.unmarshalMessage(m2, rawValue); err != nil {
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
}
} else {
delete(jsonObject, "@type")
rawJSON, err := json.Marshal(jsonObject)
if err != nil {
return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", err)
}
if err = u.unmarshalMessage(m2, rawJSON); err != nil {
return fmt.Errorf("can't unmarshal Any nested proto %v: %v", typeURL, err)
}
}
rawWire, err := protoV2.Marshal(m2.Interface())
if err != nil {
return fmt.Errorf("can't marshal proto %v into Any.Value: %v", typeURL, err)
}
m.Set(fds.ByNumber(2), protoreflect.ValueOfBytes(rawWire))
return nil
case "BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue":
fd := fds.ByNumber(1)
v, err := u.unmarshalValue(m.NewField(fd), in, fd)
if err != nil {
return err
}
m.Set(fd, v)
return nil
case "Duration":
v, err := unquoteString(string(in))
if err != nil {
return err
}
d, err := time.ParseDuration(v)
if err != nil {
return fmt.Errorf("bad Duration: %v", err)
}
sec := d.Nanoseconds() / 1e9
nsec := d.Nanoseconds() % 1e9
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
return nil
case "Timestamp":
v, err := unquoteString(string(in))
if err != nil {
return err
}
t, err := time.Parse(time.RFC3339Nano, v)
if err != nil {
return fmt.Errorf("bad Timestamp: %v", err)
}
sec := t.Unix()
nsec := t.Nanosecond()
m.Set(fds.ByNumber(1), protoreflect.ValueOfInt64(int64(sec)))
m.Set(fds.ByNumber(2), protoreflect.ValueOfInt32(int32(nsec)))
return nil
case "Value":
switch {
case string(in) == "null":
m.Set(fds.ByNumber(1), protoreflect.ValueOfEnum(0))
case string(in) == "true":
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(true))
case string(in) == "false":
m.Set(fds.ByNumber(4), protoreflect.ValueOfBool(false))
case hasPrefixAndSuffix('"', in, '"'):
s, err := unquoteString(string(in))
if err != nil {
return fmt.Errorf("unrecognized type for Value %q", in)
}
m.Set(fds.ByNumber(3), protoreflect.ValueOfString(s))
case hasPrefixAndSuffix('[', in, ']'):
v := m.Mutable(fds.ByNumber(6))
return u.unmarshalMessage(v.Message(), in)
case hasPrefixAndSuffix('{', in, '}'):
v := m.Mutable(fds.ByNumber(5))
return u.unmarshalMessage(v.Message(), in)
default:
f, err := strconv.ParseFloat(string(in), 0)
if err != nil {
return fmt.Errorf("unrecognized type for Value %q", in)
}
m.Set(fds.ByNumber(2), protoreflect.ValueOfFloat64(f))
}
return nil
case "ListValue":
var jsonArray []json.RawMessage
if err := json.Unmarshal(in, &jsonArray); err != nil {
return fmt.Errorf("bad ListValue: %v", err)
}
lv := m.Mutable(fds.ByNumber(1)).List()
for _, raw := range jsonArray {
ve := lv.NewElement()
if err := u.unmarshalMessage(ve.Message(), raw); err != nil {
return err
}
lv.Append(ve)
}
return nil
case "Struct":
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return fmt.Errorf("bad StructValue: %v", err)
}
mv := m.Mutable(fds.ByNumber(1)).Map()
for key, raw := range jsonObject {
kv := protoreflect.ValueOf(key).MapKey()
vv := mv.NewValue()
if err := u.unmarshalMessage(vv.Message(), raw); err != nil {
return fmt.Errorf("bad value in StructValue for key %q: %v", key, err)
}
mv.Set(kv, vv)
}
return nil
}
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return err
}
// Handle known fields.
for i := 0; i < fds.Len(); i++ {
fd := fds.Get(i)
if fd.IsWeak() && fd.Message().IsPlaceholder() {
continue // weak reference is not linked in
}
// Search for any raw JSON value associated with this field.
var raw json.RawMessage
name := string(fd.Name())
if fd.Kind() == protoreflect.GroupKind {
name = string(fd.Message().Name())
}
if v, ok := jsonObject[name]; ok {
delete(jsonObject, name)
raw = v
}
name = string(fd.JSONName())
if v, ok := jsonObject[name]; ok {
delete(jsonObject, name)
raw = v
}
field := m.NewField(fd)
// Unmarshal the field value.
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
continue
}
v, err := u.unmarshalValue(field, raw, fd)
if err != nil {
return err
}
m.Set(fd, v)
}
// Handle extension fields.
for name, raw := range jsonObject {
if !strings.HasPrefix(name, "[") || !strings.HasSuffix(name, "]") {
continue
}
// Resolve the extension field by name.
xname := protoreflect.FullName(name[len("[") : len(name)-len("]")])
xt, _ := protoregistry.GlobalTypes.FindExtensionByName(xname)
if xt == nil && isMessageSet(md) {
xt, _ = protoregistry.GlobalTypes.FindExtensionByName(xname.Append("message_set_extension"))
}
if xt == nil {
continue
}
delete(jsonObject, name)
fd := xt.TypeDescriptor()
if fd.ContainingMessage().FullName() != m.Descriptor().FullName() {
return fmt.Errorf("extension field %q does not extend message %q", xname, m.Descriptor().FullName())
}
field := m.NewField(fd)
// Unmarshal the field value.
if raw == nil || (string(raw) == "null" && !isSingularWellKnownValue(fd) && !isSingularJSONPBUnmarshaler(field, fd)) {
continue
}
v, err := u.unmarshalValue(field, raw, fd)
if err != nil {
return err
}
m.Set(fd, v)
}
if !u.AllowUnknownFields && len(jsonObject) > 0 {
for name := range jsonObject {
return fmt.Errorf("unknown field %q in %v", name, md.FullName())
}
}
return nil
}
func isSingularWellKnownValue(fd protoreflect.FieldDescriptor) bool {
if fd.Cardinality() == protoreflect.Repeated {
return false
}
if md := fd.Message(); md != nil {
return md.FullName() == "google.protobuf.Value"
}
if ed := fd.Enum(); ed != nil {
return ed.FullName() == "google.protobuf.NullValue"
}
return false
}
func isSingularJSONPBUnmarshaler(v protoreflect.Value, fd protoreflect.FieldDescriptor) bool {
if fd.Message() != nil && fd.Cardinality() != protoreflect.Repeated {
_, ok := proto.MessageV1(v.Interface()).(JSONPBUnmarshaler)
return ok
}
return false
}
func (u *Unmarshaler) unmarshalValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
switch {
case fd.IsList():
var jsonArray []json.RawMessage
if err := json.Unmarshal(in, &jsonArray); err != nil {
return v, err
}
lv := v.List()
for _, raw := range jsonArray {
ve, err := u.unmarshalSingularValue(lv.NewElement(), raw, fd)
if err != nil {
return v, err
}
lv.Append(ve)
}
return v, nil
case fd.IsMap():
var jsonObject map[string]json.RawMessage
if err := json.Unmarshal(in, &jsonObject); err != nil {
return v, err
}
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := v.Map()
for key, raw := range jsonObject {
var kv protoreflect.MapKey
if kfd.Kind() == protoreflect.StringKind {
kv = protoreflect.ValueOf(key).MapKey()
} else {
v, err := u.unmarshalSingularValue(kfd.Default(), []byte(key), kfd)
if err != nil {
return v, err
}
kv = v.MapKey()
}
vv, err := u.unmarshalSingularValue(mv.NewValue(), raw, vfd)
if err != nil {
return v, err
}
mv.Set(kv, vv)
}
return v, nil
default:
return u.unmarshalSingularValue(v, in, fd)
}
}
var nonFinite = map[string]float64{
`"NaN"`: math.NaN(),
`"Infinity"`: math.Inf(+1),
`"-Infinity"`: math.Inf(-1),
}
func (u *Unmarshaler) unmarshalSingularValue(v protoreflect.Value, in []byte, fd protoreflect.FieldDescriptor) (protoreflect.Value, error) {
switch fd.Kind() {
case protoreflect.BoolKind:
return unmarshalValue(in, new(bool))
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
return unmarshalValue(trimQuote(in), new(int32))
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return unmarshalValue(trimQuote(in), new(int64))
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
return unmarshalValue(trimQuote(in), new(uint32))
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return unmarshalValue(trimQuote(in), new(uint64))
case protoreflect.FloatKind:
if f, ok := nonFinite[string(in)]; ok {
return protoreflect.ValueOfFloat32(float32(f)), nil
}
return unmarshalValue(trimQuote(in), new(float32))
case protoreflect.DoubleKind:
if f, ok := nonFinite[string(in)]; ok {
return protoreflect.ValueOfFloat64(float64(f)), nil
}
return unmarshalValue(trimQuote(in), new(float64))
case protoreflect.StringKind:
return unmarshalValue(in, new(string))
case protoreflect.BytesKind:
return unmarshalValue(in, new([]byte))
case protoreflect.EnumKind:
if hasPrefixAndSuffix('"', in, '"') {
vd := fd.Enum().Values().ByName(protoreflect.Name(trimQuote(in)))
if vd == nil {
return v, fmt.Errorf("unknown value %q for enum %s", in, fd.Enum().FullName())
}
return protoreflect.ValueOfEnum(vd.Number()), nil
}
return unmarshalValue(in, new(protoreflect.EnumNumber))
case protoreflect.MessageKind, protoreflect.GroupKind:
err := u.unmarshalMessage(v.Message(), in)
return v, err
default:
panic(fmt.Sprintf("invalid kind %v", fd.Kind()))
}
}
func unmarshalValue(in []byte, v interface{}) (protoreflect.Value, error) {
err := json.Unmarshal(in, v)
return protoreflect.ValueOf(reflect.ValueOf(v).Elem().Interface()), err
}
func unquoteString(in string) (out string, err error) {
err = json.Unmarshal([]byte(in), &out)
return out, err
}
func hasPrefixAndSuffix(prefix byte, in []byte, suffix byte) bool {
if len(in) >= 2 && in[0] == prefix && in[len(in)-1] == suffix {
return true
}
return false
}
// trimQuote is like unquoteString but simply strips surrounding quotes.
// This is incorrect, but is behavior done by the legacy implementation.
func trimQuote(in []byte) []byte {
if len(in) >= 2 && in[0] == '"' && in[len(in)-1] == '"' {
in = in[1 : len(in)-1]
}
return in
}

View File

@ -1,559 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package jsonpb
import (
"encoding/json"
"errors"
"fmt"
"io"
"math"
"reflect"
"sort"
"strconv"
"strings"
"time"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/encoding/protojson"
protoV2 "google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
)
const wrapJSONMarshalV2 = false
// Marshaler is a configurable object for marshaling protocol buffer messages
// to the specified JSON representation.
type Marshaler struct {
// OrigName specifies whether to use the original protobuf name for fields.
OrigName bool
// EnumsAsInts specifies whether to render enum values as integers,
// as opposed to string values.
EnumsAsInts bool
// EmitDefaults specifies whether to render fields with zero values.
EmitDefaults bool
// Indent controls whether the output is compact or not.
// If empty, the output is compact JSON. Otherwise, every JSON object
// entry and JSON array value will be on its own line.
// Each line will be preceded by repeated copies of Indent, where the
// number of copies is the current indentation depth.
Indent string
// AnyResolver is used to resolve the google.protobuf.Any well-known type.
// If unset, the global registry is used by default.
AnyResolver AnyResolver
}
// JSONPBMarshaler is implemented by protobuf messages that customize the
// way they are marshaled to JSON. Messages that implement this should also
// implement JSONPBUnmarshaler so that the custom format can be parsed.
//
// The JSON marshaling must follow the proto to JSON specification:
// https://developers.google.com/protocol-buffers/docs/proto3#json
//
// Deprecated: Custom types should implement protobuf reflection instead.
type JSONPBMarshaler interface {
MarshalJSONPB(*Marshaler) ([]byte, error)
}
// Marshal serializes a protobuf message as JSON into w.
func (jm *Marshaler) Marshal(w io.Writer, m proto.Message) error {
b, err := jm.marshal(m)
if len(b) > 0 {
if _, err := w.Write(b); err != nil {
return err
}
}
return err
}
// MarshalToString serializes a protobuf message as JSON in string form.
func (jm *Marshaler) MarshalToString(m proto.Message) (string, error) {
b, err := jm.marshal(m)
if err != nil {
return "", err
}
return string(b), nil
}
func (jm *Marshaler) marshal(m proto.Message) ([]byte, error) {
v := reflect.ValueOf(m)
if m == nil || (v.Kind() == reflect.Ptr && v.IsNil()) {
return nil, errors.New("Marshal called with nil")
}
// Check for custom marshalers first since they may not properly
// implement protobuf reflection that the logic below relies on.
if jsm, ok := m.(JSONPBMarshaler); ok {
return jsm.MarshalJSONPB(jm)
}
if wrapJSONMarshalV2 {
opts := protojson.MarshalOptions{
UseProtoNames: jm.OrigName,
UseEnumNumbers: jm.EnumsAsInts,
EmitUnpopulated: jm.EmitDefaults,
Indent: jm.Indent,
}
if jm.AnyResolver != nil {
opts.Resolver = anyResolver{jm.AnyResolver}
}
return opts.Marshal(proto.MessageReflect(m).Interface())
} else {
// Check for unpopulated required fields first.
m2 := proto.MessageReflect(m)
if err := protoV2.CheckInitialized(m2.Interface()); err != nil {
return nil, err
}
w := jsonWriter{Marshaler: jm}
err := w.marshalMessage(m2, "", "")
return w.buf, err
}
}
type jsonWriter struct {
*Marshaler
buf []byte
}
func (w *jsonWriter) write(s string) {
w.buf = append(w.buf, s...)
}
func (w *jsonWriter) marshalMessage(m protoreflect.Message, indent, typeURL string) error {
if jsm, ok := proto.MessageV1(m.Interface()).(JSONPBMarshaler); ok {
b, err := jsm.MarshalJSONPB(w.Marshaler)
if err != nil {
return err
}
if typeURL != "" {
// we are marshaling this object to an Any type
var js map[string]*json.RawMessage
if err = json.Unmarshal(b, &js); err != nil {
return fmt.Errorf("type %T produced invalid JSON: %v", m.Interface(), err)
}
turl, err := json.Marshal(typeURL)
if err != nil {
return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err)
}
js["@type"] = (*json.RawMessage)(&turl)
if b, err = json.Marshal(js); err != nil {
return err
}
}
w.write(string(b))
return nil
}
md := m.Descriptor()
fds := md.Fields()
// Handle well-known types.
const secondInNanos = int64(time.Second / time.Nanosecond)
switch wellKnownType(md.FullName()) {
case "Any":
return w.marshalAny(m, indent)
case "BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue":
fd := fds.ByNumber(1)
return w.marshalValue(fd, m.Get(fd), indent)
case "Duration":
const maxSecondsInDuration = 315576000000
// "Generated output always contains 0, 3, 6, or 9 fractional digits,
// depending on required precision."
s := m.Get(fds.ByNumber(1)).Int()
ns := m.Get(fds.ByNumber(2)).Int()
if s < -maxSecondsInDuration || s > maxSecondsInDuration {
return fmt.Errorf("seconds out of range %v", s)
}
if ns <= -secondInNanos || ns >= secondInNanos {
return fmt.Errorf("ns out of range (%v, %v)", -secondInNanos, secondInNanos)
}
if (s > 0 && ns < 0) || (s < 0 && ns > 0) {
return errors.New("signs of seconds and nanos do not match")
}
var sign string
if s < 0 || ns < 0 {
sign, s, ns = "-", -1*s, -1*ns
}
x := fmt.Sprintf("%s%d.%09d", sign, s, ns)
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, ".000")
w.write(fmt.Sprintf(`"%vs"`, x))
return nil
case "Timestamp":
// "RFC 3339, where generated output will always be Z-normalized
// and uses 0, 3, 6 or 9 fractional digits."
s := m.Get(fds.ByNumber(1)).Int()
ns := m.Get(fds.ByNumber(2)).Int()
if ns < 0 || ns >= secondInNanos {
return fmt.Errorf("ns out of range [0, %v)", secondInNanos)
}
t := time.Unix(s, ns).UTC()
// time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits).
x := t.Format("2006-01-02T15:04:05.000000000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, "000")
x = strings.TrimSuffix(x, ".000")
w.write(fmt.Sprintf(`"%vZ"`, x))
return nil
case "Value":
// JSON value; which is a null, number, string, bool, object, or array.
od := md.Oneofs().Get(0)
fd := m.WhichOneof(od)
if fd == nil {
return errors.New("nil Value")
}
return w.marshalValue(fd, m.Get(fd), indent)
case "Struct", "ListValue":
// JSON object or array.
fd := fds.ByNumber(1)
return w.marshalValue(fd, m.Get(fd), indent)
}
w.write("{")
if w.Indent != "" {
w.write("\n")
}
firstField := true
if typeURL != "" {
if err := w.marshalTypeURL(indent, typeURL); err != nil {
return err
}
firstField = false
}
for i := 0; i < fds.Len(); {
fd := fds.Get(i)
if od := fd.ContainingOneof(); od != nil {
fd = m.WhichOneof(od)
i += od.Fields().Len()
if fd == nil {
continue
}
} else {
i++
}
v := m.Get(fd)
if !m.Has(fd) {
if !w.EmitDefaults || fd.ContainingOneof() != nil {
continue
}
if fd.Cardinality() != protoreflect.Repeated && (fd.Message() != nil || fd.Syntax() == protoreflect.Proto2) {
v = protoreflect.Value{} // use "null" for singular messages or proto2 scalars
}
}
if !firstField {
w.writeComma()
}
if err := w.marshalField(fd, v, indent); err != nil {
return err
}
firstField = false
}
// Handle proto2 extensions.
if md.ExtensionRanges().Len() > 0 {
// Collect a sorted list of all extension descriptor and values.
type ext struct {
desc protoreflect.FieldDescriptor
val protoreflect.Value
}
var exts []ext
m.Range(func(fd protoreflect.FieldDescriptor, v protoreflect.Value) bool {
if fd.IsExtension() {
exts = append(exts, ext{fd, v})
}
return true
})
sort.Slice(exts, func(i, j int) bool {
return exts[i].desc.Number() < exts[j].desc.Number()
})
for _, ext := range exts {
if !firstField {
w.writeComma()
}
if err := w.marshalField(ext.desc, ext.val, indent); err != nil {
return err
}
firstField = false
}
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
}
w.write("}")
return nil
}
func (w *jsonWriter) writeComma() {
if w.Indent != "" {
w.write(",\n")
} else {
w.write(",")
}
}
func (w *jsonWriter) marshalAny(m protoreflect.Message, indent string) error {
// "If the Any contains a value that has a special JSON mapping,
// it will be converted as follows: {"@type": xxx, "value": yyy}.
// Otherwise, the value will be converted into a JSON object,
// and the "@type" field will be inserted to indicate the actual data type."
md := m.Descriptor()
typeURL := m.Get(md.Fields().ByNumber(1)).String()
rawVal := m.Get(md.Fields().ByNumber(2)).Bytes()
var m2 protoreflect.Message
if w.AnyResolver != nil {
mi, err := w.AnyResolver.Resolve(typeURL)
if err != nil {
return err
}
m2 = proto.MessageReflect(mi)
} else {
mt, err := protoregistry.GlobalTypes.FindMessageByURL(typeURL)
if err != nil {
return err
}
m2 = mt.New()
}
if err := protoV2.Unmarshal(rawVal, m2.Interface()); err != nil {
return err
}
if wellKnownType(m2.Descriptor().FullName()) == "" {
return w.marshalMessage(m2, indent, typeURL)
}
w.write("{")
if w.Indent != "" {
w.write("\n")
}
if err := w.marshalTypeURL(indent, typeURL); err != nil {
return err
}
w.writeComma()
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
w.write(`"value": `)
} else {
w.write(`"value":`)
}
if err := w.marshalMessage(m2, indent+w.Indent, ""); err != nil {
return err
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
}
w.write("}")
return nil
}
func (w *jsonWriter) marshalTypeURL(indent, typeURL string) error {
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
}
w.write(`"@type":`)
if w.Indent != "" {
w.write(" ")
}
b, err := json.Marshal(typeURL)
if err != nil {
return err
}
w.write(string(b))
return nil
}
// marshalField writes field description and value to the Writer.
func (w *jsonWriter) marshalField(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
if w.Indent != "" {
w.write(indent)
w.write(w.Indent)
}
w.write(`"`)
switch {
case fd.IsExtension():
// For message set, use the fname of the message as the extension name.
name := string(fd.FullName())
if isMessageSet(fd.ContainingMessage()) {
name = strings.TrimSuffix(name, ".message_set_extension")
}
w.write("[" + name + "]")
case w.OrigName:
name := string(fd.Name())
if fd.Kind() == protoreflect.GroupKind {
name = string(fd.Message().Name())
}
w.write(name)
default:
w.write(string(fd.JSONName()))
}
w.write(`":`)
if w.Indent != "" {
w.write(" ")
}
return w.marshalValue(fd, v, indent)
}
func (w *jsonWriter) marshalValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
switch {
case fd.IsList():
w.write("[")
comma := ""
lv := v.List()
for i := 0; i < lv.Len(); i++ {
w.write(comma)
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
w.write(w.Indent)
}
if err := w.marshalSingularValue(fd, lv.Get(i), indent+w.Indent); err != nil {
return err
}
comma = ","
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
}
w.write("]")
return nil
case fd.IsMap():
kfd := fd.MapKey()
vfd := fd.MapValue()
mv := v.Map()
// Collect a sorted list of all map keys and values.
type entry struct{ key, val protoreflect.Value }
var entries []entry
mv.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
entries = append(entries, entry{k.Value(), v})
return true
})
sort.Slice(entries, func(i, j int) bool {
switch kfd.Kind() {
case protoreflect.BoolKind:
return !entries[i].key.Bool() && entries[j].key.Bool()
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind, protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return entries[i].key.Int() < entries[j].key.Int()
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind, protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return entries[i].key.Uint() < entries[j].key.Uint()
case protoreflect.StringKind:
return entries[i].key.String() < entries[j].key.String()
default:
panic("invalid kind")
}
})
w.write(`{`)
comma := ""
for _, entry := range entries {
w.write(comma)
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
w.write(w.Indent)
}
s := fmt.Sprint(entry.key.Interface())
b, err := json.Marshal(s)
if err != nil {
return err
}
w.write(string(b))
w.write(`:`)
if w.Indent != "" {
w.write(` `)
}
if err := w.marshalSingularValue(vfd, entry.val, indent+w.Indent); err != nil {
return err
}
comma = ","
}
if w.Indent != "" {
w.write("\n")
w.write(indent)
w.write(w.Indent)
}
w.write(`}`)
return nil
default:
return w.marshalSingularValue(fd, v, indent)
}
}
func (w *jsonWriter) marshalSingularValue(fd protoreflect.FieldDescriptor, v protoreflect.Value, indent string) error {
switch {
case !v.IsValid():
w.write("null")
return nil
case fd.Message() != nil:
return w.marshalMessage(v.Message(), indent+w.Indent, "")
case fd.Enum() != nil:
if fd.Enum().FullName() == "google.protobuf.NullValue" {
w.write("null")
return nil
}
vd := fd.Enum().Values().ByNumber(v.Enum())
if vd == nil || w.EnumsAsInts {
w.write(strconv.Itoa(int(v.Enum())))
} else {
w.write(`"` + string(vd.Name()) + `"`)
}
return nil
default:
switch v.Interface().(type) {
case float32, float64:
switch {
case math.IsInf(v.Float(), +1):
w.write(`"Infinity"`)
return nil
case math.IsInf(v.Float(), -1):
w.write(`"-Infinity"`)
return nil
case math.IsNaN(v.Float()):
w.write(`"NaN"`)
return nil
}
case int64, uint64:
w.write(fmt.Sprintf(`"%d"`, v.Interface()))
return nil
}
b, err := json.Marshal(v.Interface())
if err != nil {
return err
}
w.write(string(b))
return nil
}
}

View File

@ -1,69 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package jsonpb provides functionality to marshal and unmarshal between a
// protocol buffer message and JSON. It follows the specification at
// https://developers.google.com/protocol-buffers/docs/proto3#json.
//
// Do not rely on the default behavior of the standard encoding/json package
// when called on generated message types as it does not operate correctly.
//
// Deprecated: Use the "google.golang.org/protobuf/encoding/protojson"
// package instead.
package jsonpb
import (
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoimpl"
)
// AnyResolver takes a type URL, present in an Any message,
// and resolves it into an instance of the associated message.
type AnyResolver interface {
Resolve(typeURL string) (proto.Message, error)
}
type anyResolver struct{ AnyResolver }
func (r anyResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
return r.FindMessageByURL(string(message))
}
func (r anyResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
m, err := r.Resolve(url)
if err != nil {
return nil, err
}
return protoimpl.X.MessageTypeOf(m), nil
}
func (r anyResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
return protoregistry.GlobalTypes.FindExtensionByName(field)
}
func (r anyResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
return protoregistry.GlobalTypes.FindExtensionByNumber(message, field)
}
func wellKnownType(s protoreflect.FullName) string {
if s.Parent() == "google.protobuf" {
switch s.Name() {
case "Empty", "Any",
"BoolValue", "BytesValue", "StringValue",
"Int32Value", "UInt32Value", "FloatValue",
"Int64Value", "UInt64Value", "DoubleValue",
"Duration", "Timestamp",
"NullValue", "Struct", "Value", "ListValue":
return string(s.Name())
}
}
return ""
}
func isMessageSet(md protoreflect.MessageDescriptor) bool {
ms, ok := md.(interface{ IsMessageSet() bool })
return ok && ms.IsMessageSet()
}

View File

@ -1,179 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"fmt"
"strings"
"github.com/golang/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
anypb "github.com/golang/protobuf/ptypes/any"
)
const urlPrefix = "type.googleapis.com/"
// AnyMessageName returns the message name contained in an anypb.Any message.
// Most type assertions should use the Is function instead.
//
// Deprecated: Call the any.MessageName method instead.
func AnyMessageName(any *anypb.Any) (string, error) {
name, err := anyMessageName(any)
return string(name), err
}
func anyMessageName(any *anypb.Any) (protoreflect.FullName, error) {
if any == nil {
return "", fmt.Errorf("message is nil")
}
name := protoreflect.FullName(any.TypeUrl)
if i := strings.LastIndex(any.TypeUrl, "/"); i >= 0 {
name = name[i+len("/"):]
}
if !name.IsValid() {
return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl)
}
return name, nil
}
// MarshalAny marshals the given message m into an anypb.Any message.
//
// Deprecated: Call the anypb.New function instead.
func MarshalAny(m proto.Message) (*anypb.Any, error) {
switch dm := m.(type) {
case DynamicAny:
m = dm.Message
case *DynamicAny:
if dm == nil {
return nil, proto.ErrNil
}
m = dm.Message
}
b, err := proto.Marshal(m)
if err != nil {
return nil, err
}
return &anypb.Any{TypeUrl: urlPrefix + proto.MessageName(m), Value: b}, nil
}
// Empty returns a new message of the type specified in an anypb.Any message.
// It returns protoregistry.NotFound if the corresponding message type could not
// be resolved in the global registry.
//
// Deprecated: Use protoregistry.GlobalTypes.FindMessageByName instead
// to resolve the message name and create a new instance of it.
func Empty(any *anypb.Any) (proto.Message, error) {
name, err := anyMessageName(any)
if err != nil {
return nil, err
}
mt, err := protoregistry.GlobalTypes.FindMessageByName(name)
if err != nil {
return nil, err
}
return proto.MessageV1(mt.New().Interface()), nil
}
// UnmarshalAny unmarshals the encoded value contained in the anypb.Any message
// into the provided message m. It returns an error if the target message
// does not match the type in the Any message or if an unmarshal error occurs.
//
// The target message m may be a *DynamicAny message. If the underlying message
// type could not be resolved, then this returns protoregistry.NotFound.
//
// Deprecated: Call the any.UnmarshalTo method instead.
func UnmarshalAny(any *anypb.Any, m proto.Message) error {
if dm, ok := m.(*DynamicAny); ok {
if dm.Message == nil {
var err error
dm.Message, err = Empty(any)
if err != nil {
return err
}
}
m = dm.Message
}
anyName, err := AnyMessageName(any)
if err != nil {
return err
}
msgName := proto.MessageName(m)
if anyName != msgName {
return fmt.Errorf("mismatched message type: got %q want %q", anyName, msgName)
}
return proto.Unmarshal(any.Value, m)
}
// Is reports whether the Any message contains a message of the specified type.
//
// Deprecated: Call the any.MessageIs method instead.
func Is(any *anypb.Any, m proto.Message) bool {
if any == nil || m == nil {
return false
}
name := proto.MessageName(m)
if !strings.HasSuffix(any.TypeUrl, name) {
return false
}
return len(any.TypeUrl) == len(name) || any.TypeUrl[len(any.TypeUrl)-len(name)-1] == '/'
}
// DynamicAny is a value that can be passed to UnmarshalAny to automatically
// allocate a proto.Message for the type specified in an anypb.Any message.
// The allocated message is stored in the embedded proto.Message.
//
// Example:
// var x ptypes.DynamicAny
// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... }
// fmt.Printf("unmarshaled message: %v", x.Message)
//
// Deprecated: Use the any.UnmarshalNew method instead to unmarshal
// the any message contents into a new instance of the underlying message.
type DynamicAny struct{ proto.Message }
func (m DynamicAny) String() string {
if m.Message == nil {
return "<nil>"
}
return m.Message.String()
}
func (m DynamicAny) Reset() {
if m.Message == nil {
return
}
m.Message.Reset()
}
func (m DynamicAny) ProtoMessage() {
return
}
func (m DynamicAny) ProtoReflect() protoreflect.Message {
if m.Message == nil {
return nil
}
return dynamicAny{proto.MessageReflect(m.Message)}
}
type dynamicAny struct{ protoreflect.Message }
func (m dynamicAny) Type() protoreflect.MessageType {
return dynamicAnyType{m.Message.Type()}
}
func (m dynamicAny) New() protoreflect.Message {
return dynamicAnyType{m.Message.Type()}.New()
}
func (m dynamicAny) Interface() protoreflect.ProtoMessage {
return DynamicAny{proto.MessageV1(m.Message.Interface())}
}
type dynamicAnyType struct{ protoreflect.MessageType }
func (t dynamicAnyType) New() protoreflect.Message {
return dynamicAny{t.MessageType.New()}
}
func (t dynamicAnyType) Zero() protoreflect.Message {
return dynamicAny{t.MessageType.Zero()}
}

View File

@ -1,62 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/any/any.proto
package any
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
anypb "google.golang.org/protobuf/types/known/anypb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/any.proto.
type Any = anypb.Any
var File_github_com_golang_protobuf_ptypes_any_any_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = []byte{
0x0a, 0x2f, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x61, 0x6e, 0x79, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74,
0x6f, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62,
0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x2b, 0x5a, 0x29,
0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e,
0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65,
0x73, 0x2f, 0x61, 0x6e, 0x79, 0x3b, 0x61, 0x6e, 0x79, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
}
var file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_any_any_proto_init() }
func file_github_com_golang_protobuf_ptypes_any_any_proto_init() {
if File_github_com_golang_protobuf_ptypes_any_any_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_any_any_proto = out.File
file_github_com_golang_protobuf_ptypes_any_any_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_any_any_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_any_any_proto_depIdxs = nil
}

View File

@ -1,10 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package ptypes provides functionality for interacting with well-known types.
//
// Deprecated: Well-known types have specialized functionality directly
// injected into the generated packages for each message type.
// See the deprecation notice for each function for the suggested alternative.
package ptypes

View File

@ -1,76 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"errors"
"fmt"
"time"
durationpb "github.com/golang/protobuf/ptypes/duration"
)
// Range of google.protobuf.Duration as specified in duration.proto.
// This is about 10,000 years in seconds.
const (
maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60)
minSeconds = -maxSeconds
)
// Duration converts a durationpb.Duration to a time.Duration.
// Duration returns an error if dur is invalid or overflows a time.Duration.
//
// Deprecated: Call the dur.AsDuration and dur.CheckValid methods instead.
func Duration(dur *durationpb.Duration) (time.Duration, error) {
if err := validateDuration(dur); err != nil {
return 0, err
}
d := time.Duration(dur.Seconds) * time.Second
if int64(d/time.Second) != dur.Seconds {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
if dur.Nanos != 0 {
d += time.Duration(dur.Nanos) * time.Nanosecond
if (d < 0) != (dur.Nanos < 0) {
return 0, fmt.Errorf("duration: %v is out of range for time.Duration", dur)
}
}
return d, nil
}
// DurationProto converts a time.Duration to a durationpb.Duration.
//
// Deprecated: Call the durationpb.New function instead.
func DurationProto(d time.Duration) *durationpb.Duration {
nanos := d.Nanoseconds()
secs := nanos / 1e9
nanos -= secs * 1e9
return &durationpb.Duration{
Seconds: int64(secs),
Nanos: int32(nanos),
}
}
// validateDuration determines whether the durationpb.Duration is valid
// according to the definition in google/protobuf/duration.proto.
// A valid durpb.Duration may still be too large to fit into a time.Duration
// Note that the range of durationpb.Duration is about 10,000 years,
// while the range of time.Duration is about 290 years.
func validateDuration(dur *durationpb.Duration) error {
if dur == nil {
return errors.New("duration: nil Duration")
}
if dur.Seconds < minSeconds || dur.Seconds > maxSeconds {
return fmt.Errorf("duration: %v: seconds out of range", dur)
}
if dur.Nanos <= -1e9 || dur.Nanos >= 1e9 {
return fmt.Errorf("duration: %v: nanos out of range", dur)
}
// Seconds and Nanos must have the same sign, unless d.Nanos is zero.
if (dur.Seconds < 0 && dur.Nanos > 0) || (dur.Seconds > 0 && dur.Nanos < 0) {
return fmt.Errorf("duration: %v: seconds and nanos have different signs", dur)
}
return nil
}

View File

@ -1,63 +0,0 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: github.com/golang/protobuf/ptypes/duration/duration.proto
package duration
import (
protoreflect "google.golang.org/protobuf/reflect/protoreflect"
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
durationpb "google.golang.org/protobuf/types/known/durationpb"
reflect "reflect"
)
// Symbols defined in public import of google/protobuf/duration.proto.
type Duration = durationpb.Duration
var File_github_com_golang_protobuf_ptypes_duration_duration_proto protoreflect.FileDescriptor
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = []byte{
0x0a, 0x39, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c,
0x61, 0x6e, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79,
0x70, 0x65, 0x73, 0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x64, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x75, 0x72,
0x61, 0x74, 0x69, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x42, 0x35, 0x5a, 0x33, 0x67,
0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67,
0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x70, 0x74, 0x79, 0x70, 0x65, 0x73,
0x2f, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x3b, 0x64, 0x75, 0x72, 0x61, 0x74, 0x69,
0x6f, 0x6e, 0x50, 0x00, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = []interface{}{}
var file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = []int32{
0, // [0:0] is the sub-list for method output_type
0, // [0:0] is the sub-list for method input_type
0, // [0:0] is the sub-list for extension type_name
0, // [0:0] is the sub-list for extension extendee
0, // [0:0] is the sub-list for field type_name
}
func init() { file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() }
func file_github_com_golang_protobuf_ptypes_duration_duration_proto_init() {
if File_github_com_golang_protobuf_ptypes_duration_duration_proto != nil {
return
}
type x struct{}
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc,
NumEnums: 0,
NumMessages: 0,
NumExtensions: 0,
NumServices: 0,
},
GoTypes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes,
DependencyIndexes: file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs,
}.Build()
File_github_com_golang_protobuf_ptypes_duration_duration_proto = out.File
file_github_com_golang_protobuf_ptypes_duration_duration_proto_rawDesc = nil
file_github_com_golang_protobuf_ptypes_duration_duration_proto_goTypes = nil
file_github_com_golang_protobuf_ptypes_duration_duration_proto_depIdxs = nil
}

View File

@ -1,112 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package ptypes
import (
"errors"
"fmt"
"time"
timestamppb "github.com/golang/protobuf/ptypes/timestamp"
)
// Range of google.protobuf.Duration as specified in timestamp.proto.
const (
// Seconds field of the earliest valid Timestamp.
// This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
minValidSeconds = -62135596800
// Seconds field just after the latest valid Timestamp.
// This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix().
maxValidSeconds = 253402300800
)
// Timestamp converts a timestamppb.Timestamp to a time.Time.
// It returns an error if the argument is invalid.
//
// Unlike most Go functions, if Timestamp returns an error, the first return
// value is not the zero time.Time. Instead, it is the value obtained from the
// time.Unix function when passed the contents of the Timestamp, in the UTC
// locale. This may or may not be a meaningful time; many invalid Timestamps
// do map to valid time.Times.
//
// A nil Timestamp returns an error. The first return value in that case is
// undefined.
//
// Deprecated: Call the ts.AsTime and ts.CheckValid methods instead.
func Timestamp(ts *timestamppb.Timestamp) (time.Time, error) {
// Don't return the zero value on error, because corresponds to a valid
// timestamp. Instead return whatever time.Unix gives us.
var t time.Time
if ts == nil {
t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp
} else {
t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC()
}
return t, validateTimestamp(ts)
}
// TimestampNow returns a google.protobuf.Timestamp for the current time.
//
// Deprecated: Call the timestamppb.Now function instead.
func TimestampNow() *timestamppb.Timestamp {
ts, err := TimestampProto(time.Now())
if err != nil {
panic("ptypes: time.Now() out of Timestamp range")
}
return ts
}
// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto.
// It returns an error if the resulting Timestamp is invalid.
//
// Deprecated: Call the timestamppb.New function instead.
func TimestampProto(t time.Time) (*timestamppb.Timestamp, error) {
ts := &timestamppb.Timestamp{
Seconds: t.Unix(),
Nanos: int32(t.Nanosecond()),
}
if err := validateTimestamp(ts); err != nil {
return nil, err
}
return ts, nil
}
// TimestampString returns the RFC 3339 string for valid Timestamps.
// For invalid Timestamps, it returns an error message in parentheses.
//
// Deprecated: Call the ts.AsTime method instead,
// followed by a call to the Format method on the time.Time value.
func TimestampString(ts *timestamppb.Timestamp) string {
t, err := Timestamp(ts)
if err != nil {
return fmt.Sprintf("(%v)", err)
}
return t.Format(time.RFC3339Nano)
}
// validateTimestamp determines whether a Timestamp is valid.
// A valid timestamp represents a time in the range [0001-01-01, 10000-01-01)
// and has a Nanos field in the range [0, 1e9).
//
// If the Timestamp is valid, validateTimestamp returns nil.
// Otherwise, it returns an error that describes the problem.
//
// Every valid Timestamp can be represented by a time.Time,
// but the converse is not true.
func validateTimestamp(ts *timestamppb.Timestamp) error {
if ts == nil {
return errors.New("timestamp: nil Timestamp")
}
if ts.Seconds < minValidSeconds {
return fmt.Errorf("timestamp: %v before 0001-01-01", ts)
}
if ts.Seconds >= maxValidSeconds {
return fmt.Errorf("timestamp: %v after 10000-01-01", ts)
}
if ts.Nanos < 0 || ts.Nanos >= 1e9 {
return fmt.Errorf("timestamp: %v: nanos not in range [0, 1e9)", ts)
}
return nil
}

View File

@ -1,5 +1,36 @@
# Changelog # Changelog
## [1.6.0](https://github.com/google/uuid/compare/v1.5.0...v1.6.0) (2024-01-16)
### Features
* add Max UUID constant ([#149](https://github.com/google/uuid/issues/149)) ([c58770e](https://github.com/google/uuid/commit/c58770eb495f55fe2ced6284f93c5158a62e53e3))
### Bug Fixes
* fix typo in version 7 uuid documentation ([#153](https://github.com/google/uuid/issues/153)) ([016b199](https://github.com/google/uuid/commit/016b199544692f745ffc8867b914129ecb47ef06))
* Monotonicity in UUIDv7 ([#150](https://github.com/google/uuid/issues/150)) ([a2b2b32](https://github.com/google/uuid/commit/a2b2b32373ff0b1a312b7fdf6d38a977099698a6))
## [1.5.0](https://github.com/google/uuid/compare/v1.4.0...v1.5.0) (2023-12-12)
### Features
* Validate UUID without creating new UUID ([#141](https://github.com/google/uuid/issues/141)) ([9ee7366](https://github.com/google/uuid/commit/9ee7366e66c9ad96bab89139418a713dc584ae29))
## [1.4.0](https://github.com/google/uuid/compare/v1.3.1...v1.4.0) (2023-10-26)
### Features
* UUIDs slice type with Strings() convenience method ([#133](https://github.com/google/uuid/issues/133)) ([cd5fbbd](https://github.com/google/uuid/commit/cd5fbbdd02f3e3467ac18940e07e062be1f864b4))
### Fixes
* Clarify that Parse's job is to parse but not necessarily validate strings. (Documents current behavior)
## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18) ## [1.3.1](https://github.com/google/uuid/compare/v1.3.0...v1.3.1) (2023-08-18)

View File

@ -11,7 +11,7 @@ please explain why in the pull request description.
### Releasing ### Releasing
Commits that would precipitate a SemVer change, as desrcibed in the Conventional Commits that would precipitate a SemVer change, as described in the Conventional
Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action) Commits Specification, will trigger [`release-please`](https://github.com/google-github-actions/release-please-action)
to create a release candidate pull request. Once submitted, `release-please` to create a release candidate pull request. Once submitted, `release-please`
will create a release. will create a release.

View File

@ -17,6 +17,12 @@ var (
NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8")) NameSpaceOID = Must(Parse("6ba7b812-9dad-11d1-80b4-00c04fd430c8"))
NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8")) NameSpaceX500 = Must(Parse("6ba7b814-9dad-11d1-80b4-00c04fd430c8"))
Nil UUID // empty UUID, all zeros Nil UUID // empty UUID, all zeros
// The Max UUID is special form of UUID that is specified to have all 128 bits set to 1.
Max = UUID{
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF,
}
) )
// NewHash returns a new UUID derived from the hash of space concatenated with // NewHash returns a new UUID derived from the hash of space concatenated with

View File

@ -108,12 +108,23 @@ func setClockSequence(seq int) {
} }
// Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in // Time returns the time in 100s of nanoseconds since 15 Oct 1582 encoded in
// uuid. The time is only defined for version 1 and 2 UUIDs. // uuid. The time is only defined for version 1, 2, 6 and 7 UUIDs.
func (uuid UUID) Time() Time { func (uuid UUID) Time() Time {
time := int64(binary.BigEndian.Uint32(uuid[0:4])) var t Time
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32 switch uuid.Version() {
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48 case 6:
return Time(time) time := binary.BigEndian.Uint64(uuid[:8]) // Ignore uuid[6] version b0110
t = Time(time)
case 7:
time := binary.BigEndian.Uint64(uuid[:8])
t = Time((time>>16)*10000 + g1582ns100)
default: // forward compatible
time := int64(binary.BigEndian.Uint32(uuid[0:4]))
time |= int64(binary.BigEndian.Uint16(uuid[4:6])) << 32
time |= int64(binary.BigEndian.Uint16(uuid[6:8])&0xfff) << 48
t = Time(time)
}
return t
} }
// ClockSequence returns the clock sequence encoded in uuid. // ClockSequence returns the clock sequence encoded in uuid.

View File

@ -56,11 +56,15 @@ func IsInvalidLengthError(err error) bool {
return ok return ok
} }
// Parse decodes s into a UUID or returns an error. Both the standard UUID // Parse decodes s into a UUID or returns an error if it cannot be parsed. Both
// forms of xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and // the standard UUID forms defined in RFC 4122
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx are decoded as well as the // (xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx and
// Microsoft encoding {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx} and the raw hex // urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx) are decoded. In addition,
// encoding: xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx. // Parse accepts non-standard strings such as the raw hex encoding
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx and 38 byte "Microsoft style" encodings,
// e.g. {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}. Only the middle 36 bytes are
// examined in the latter case. Parse should not be used to validate strings as
// it parses non-standard encodings as indicated above.
func Parse(s string) (UUID, error) { func Parse(s string) (UUID, error) {
var uuid UUID var uuid UUID
switch len(s) { switch len(s) {
@ -182,6 +186,59 @@ func Must(uuid UUID, err error) UUID {
return uuid return uuid
} }
// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
// It returns an error if the format is invalid, otherwise nil.
func Validate(s string) error {
switch len(s) {
// Standard UUID format
case 36:
// UUID with "urn:uuid:" prefix
case 36 + 9:
if !strings.EqualFold(s[:9], "urn:uuid:") {
return fmt.Errorf("invalid urn prefix: %q", s[:9])
}
s = s[9:]
// UUID enclosed in braces
case 36 + 2:
if s[0] != '{' || s[len(s)-1] != '}' {
return fmt.Errorf("invalid bracketed UUID format")
}
s = s[1 : len(s)-1]
// UUID without hyphens
case 32:
for i := 0; i < len(s); i += 2 {
_, ok := xtob(s[i], s[i+1])
if !ok {
return errors.New("invalid UUID format")
}
}
default:
return invalidLengthError{len(s)}
}
// Check for standard UUID format
if len(s) == 36 {
if s[8] != '-' || s[13] != '-' || s[18] != '-' || s[23] != '-' {
return errors.New("invalid UUID format")
}
for _, x := range []int{0, 2, 4, 6, 9, 11, 14, 16, 19, 21, 24, 26, 28, 30, 32, 34} {
if _, ok := xtob(s[x], s[x+1]); !ok {
return errors.New("invalid UUID format")
}
}
}
return nil
}
// String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx // String returns the string form of uuid, xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// , or "" if uuid is invalid. // , or "" if uuid is invalid.
func (uuid UUID) String() string { func (uuid UUID) String() string {
@ -294,3 +351,15 @@ func DisableRandPool() {
poolMu.Lock() poolMu.Lock()
poolPos = randPoolSize poolPos = randPoolSize
} }
// UUIDs is a slice of UUID types.
type UUIDs []UUID
// Strings returns a string slice containing the string form of each UUID in uuids.
func (uuids UUIDs) Strings() []string {
var uuidStrs = make([]string, len(uuids))
for i, uuid := range uuids {
uuidStrs[i] = uuid.String()
}
return uuidStrs
}

56
vendor/github.com/google/uuid/version6.go generated vendored Normal file
View File

@ -0,0 +1,56 @@
// Copyright 2023 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import "encoding/binary"
// UUID version 6 is a field-compatible version of UUIDv1, reordered for improved DB locality.
// It is expected that UUIDv6 will primarily be used in contexts where there are existing v1 UUIDs.
// Systems that do not involve legacy UUIDv1 SHOULD consider using UUIDv7 instead.
//
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#uuidv6
//
// NewV6 returns a Version 6 UUID based on the current NodeID and clock
// sequence, and the current time. If the NodeID has not been set by SetNodeID
// or SetNodeInterface then it will be set automatically. If the NodeID cannot
// be set NewV6 set NodeID is random bits automatically . If clock sequence has not been set by
// SetClockSequence then it will be set automatically. If GetTime fails to
// return the current NewV6 returns Nil and an error.
func NewV6() (UUID, error) {
var uuid UUID
now, seq, err := GetTime()
if err != nil {
return uuid, err
}
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| time_high |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| time_mid | time_low_and_version |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|clk_seq_hi_res | clk_seq_low | node (0-1) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| node (2-5) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
binary.BigEndian.PutUint64(uuid[0:], uint64(now))
binary.BigEndian.PutUint16(uuid[8:], seq)
uuid[6] = 0x60 | (uuid[6] & 0x0F)
uuid[8] = 0x80 | (uuid[8] & 0x3F)
nodeMu.Lock()
if nodeID == zeroID {
setNodeInterface("")
}
copy(uuid[10:], nodeID[:])
nodeMu.Unlock()
return uuid, nil
}

104
vendor/github.com/google/uuid/version7.go generated vendored Normal file
View File

@ -0,0 +1,104 @@
// Copyright 2023 Google Inc. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package uuid
import (
"io"
)
// UUID version 7 features a time-ordered value field derived from the widely
// implemented and well known Unix Epoch timestamp source,
// the number of milliseconds seconds since midnight 1 Jan 1970 UTC, leap seconds excluded.
// As well as improved entropy characteristics over versions 1 or 6.
//
// see https://datatracker.ietf.org/doc/html/draft-peabody-dispatch-new-uuid-format-03#name-uuid-version-7
//
// Implementations SHOULD utilize UUID version 7 over UUID version 1 and 6 if possible.
//
// NewV7 returns a Version 7 UUID based on the current time(Unix Epoch).
// Uses the randomness pool if it was enabled with EnableRandPool.
// On error, NewV7 returns Nil and an error
func NewV7() (UUID, error) {
uuid, err := NewRandom()
if err != nil {
return uuid, err
}
makeV7(uuid[:])
return uuid, nil
}
// NewV7FromReader returns a Version 7 UUID based on the current time(Unix Epoch).
// it use NewRandomFromReader fill random bits.
// On error, NewV7FromReader returns Nil and an error.
func NewV7FromReader(r io.Reader) (UUID, error) {
uuid, err := NewRandomFromReader(r)
if err != nil {
return uuid, err
}
makeV7(uuid[:])
return uuid, nil
}
// makeV7 fill 48 bits time (uuid[0] - uuid[5]), set version b0111 (uuid[6])
// uuid[8] already has the right version number (Variant is 10)
// see function NewV7 and NewV7FromReader
func makeV7(uuid []byte) {
/*
0 1 2 3
0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| unix_ts_ms | ver | rand_a (12 bit seq) |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
|var| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
| rand_b |
+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
*/
_ = uuid[15] // bounds check
t, s := getV7Time()
uuid[0] = byte(t >> 40)
uuid[1] = byte(t >> 32)
uuid[2] = byte(t >> 24)
uuid[3] = byte(t >> 16)
uuid[4] = byte(t >> 8)
uuid[5] = byte(t)
uuid[6] = 0x70 | (0x0F & byte(s>>8))
uuid[7] = byte(s)
}
// lastV7time is the last time we returned stored as:
//
// 52 bits of time in milliseconds since epoch
// 12 bits of (fractional nanoseconds) >> 8
var lastV7time int64
const nanoPerMilli = 1000000
// getV7Time returns the time in milliseconds and nanoseconds / 256.
// The returned (milli << 12 + seq) is guarenteed to be greater than
// (milli << 12 + seq) returned by any previous call to getV7Time.
func getV7Time() (milli, seq int64) {
timeMu.Lock()
defer timeMu.Unlock()
nano := timeNow().UnixNano()
milli = nano / nanoPerMilli
// Sequence number is between 0 and 3906 (nanoPerMilli>>8)
seq = (nano - milli*nanoPerMilli) >> 8
now := milli<<12 + seq
if now <= lastV7time {
now = lastV7time + 1
milli = now >> 12
seq = now & 0xfff
}
lastV7time = now
return milli, seq
}

View File

@ -24,7 +24,7 @@ go_test(
embed = [":httprule"], embed = [":httprule"],
deps = [ deps = [
"//utilities", "//utilities",
"@com_github_golang_glog//:glog", "@org_golang_google_grpc//grpclog",
], ],
) )

View File

@ -26,7 +26,7 @@ go_library(
deps = [ deps = [
"//internal/httprule", "//internal/httprule",
"//utilities", "//utilities",
"@go_googleapis//google/api:httpbody_go_proto", "@org_golang_google_genproto_googleapis_api//httpbody",
"@org_golang_google_grpc//codes", "@org_golang_google_grpc//codes",
"@org_golang_google_grpc//grpclog", "@org_golang_google_grpc//grpclog",
"@org_golang_google_grpc//health/grpc_health_v1", "@org_golang_google_grpc//health/grpc_health_v1",
@ -70,9 +70,9 @@ go_test(
"//utilities", "//utilities",
"@com_github_google_go_cmp//cmp", "@com_github_google_go_cmp//cmp",
"@com_github_google_go_cmp//cmp/cmpopts", "@com_github_google_go_cmp//cmp/cmpopts",
"@go_googleapis//google/api:httpbody_go_proto", "@org_golang_google_genproto_googleapis_api//httpbody",
"@go_googleapis//google/rpc:errdetails_go_proto", "@org_golang_google_genproto_googleapis_rpc//errdetails",
"@go_googleapis//google/rpc:status_go_proto", "@org_golang_google_genproto_googleapis_rpc//status",
"@org_golang_google_grpc//:go_default_library", "@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes", "@org_golang_google_grpc//codes",
"@org_golang_google_grpc//health/grpc_health_v1", "@org_golang_google_grpc//health/grpc_health_v1",

View File

@ -137,7 +137,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
doForwardTrailers := requestAcceptsTrailers(r) doForwardTrailers := requestAcceptsTrailers(r)
if doForwardTrailers { if doForwardTrailers {
handleForwardResponseTrailerHeader(w, md) handleForwardResponseTrailerHeader(w, mux, md)
w.Header().Set("Transfer-Encoding", "chunked") w.Header().Set("Transfer-Encoding", "chunked")
} }
@ -152,7 +152,7 @@ func DefaultHTTPErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marsh
} }
if doForwardTrailers { if doForwardTrailers {
handleForwardResponseTrailer(w, md) handleForwardResponseTrailer(w, mux, md)
} }
} }

View File

@ -27,7 +27,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
var root interface{} var root interface{}
if err := json.NewDecoder(r).Decode(&root); err != nil { if err := json.NewDecoder(r).Decode(&root); err != nil {
if err == io.EOF { if errors.Is(err, io.EOF) {
return fm, nil return fm, nil
} }
return nil, err return nil, err
@ -41,7 +41,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
m, ok := item.node.(map[string]interface{}) m, ok := item.node.(map[string]interface{})
switch { switch {
case ok: case ok && len(m) > 0:
// if the item is an object, then enqueue all of its children // if the item is an object, then enqueue all of its children
for k, v := range m { for k, v := range m {
if item.msg == nil { if item.msg == nil {
@ -96,6 +96,8 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
queue = append(queue, child) queue = append(queue, child)
} }
} }
case ok && len(m) == 0:
fallthrough
case len(item.path) > 0: case len(item.path) > 0:
// otherwise, it's a leaf node so print its path // otherwise, it's a leaf node so print its path
fm.Paths = append(fm.Paths, item.path) fm.Paths = append(fm.Paths, item.path)

View File

@ -2,7 +2,7 @@ package runtime
import ( import (
"context" "context"
"fmt" "errors"
"io" "io"
"net/http" "net/http"
"net/textproto" "net/textproto"
@ -48,7 +48,7 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
var wroteHeader bool var wroteHeader bool
for { for {
resp, err := recv() resp, err := recv()
if err == io.EOF { if errors.Is(err, io.EOF) {
return return
} }
if err != nil { if err != nil {
@ -108,18 +108,20 @@ func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, m
} }
} }
func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { func handleForwardResponseTrailerHeader(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
for k := range md.TrailerMD { for k := range md.TrailerMD {
tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) if h, ok := mux.outgoingTrailerMatcher(k); ok {
w.Header().Add("Trailer", tKey) w.Header().Add("Trailer", textproto.CanonicalMIMEHeaderKey(h))
}
} }
} }
func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { func handleForwardResponseTrailer(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) {
for k, vs := range md.TrailerMD { for k, vs := range md.TrailerMD {
tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) if h, ok := mux.outgoingTrailerMatcher(k); ok {
for _, v := range vs { for _, v := range vs {
w.Header().Add(tKey, v) w.Header().Add(h, v)
}
} }
} }
} }
@ -147,12 +149,10 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
doForwardTrailers := requestAcceptsTrailers(req) doForwardTrailers := requestAcceptsTrailers(req)
if doForwardTrailers { if doForwardTrailers {
handleForwardResponseTrailerHeader(w, md) handleForwardResponseTrailerHeader(w, mux, md)
w.Header().Set("Transfer-Encoding", "chunked") w.Header().Set("Transfer-Encoding", "chunked")
} }
handleForwardResponseTrailerHeader(w, md)
contentType := marshaler.ContentType(resp) contentType := marshaler.ContentType(resp)
w.Header().Set("Content-Type", contentType) w.Header().Set("Content-Type", contentType)
@ -178,7 +178,7 @@ func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marsha
} }
if doForwardTrailers { if doForwardTrailers {
handleForwardResponseTrailer(w, md) handleForwardResponseTrailer(w, mux, md)
} }
} }

View File

@ -26,7 +26,7 @@ func (h *HTTPBodyMarshaler) ContentType(v interface{}) string {
// google.api.HttpBody message, otherwise it falls back to the default Marshaler. // google.api.HttpBody message, otherwise it falls back to the default Marshaler.
func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) {
if httpBody, ok := v.(*httpbody.HttpBody); ok { if httpBody, ok := v.(*httpbody.HttpBody); ok {
return httpBody.Data, nil return httpBody.GetData(), nil
} }
return h.Marshaler.Marshal(v) return h.Marshaler.Marshal(v)
} }

View File

@ -57,6 +57,7 @@ type ServeMux struct {
marshalers marshalerRegistry marshalers marshalerRegistry
incomingHeaderMatcher HeaderMatcherFunc incomingHeaderMatcher HeaderMatcherFunc
outgoingHeaderMatcher HeaderMatcherFunc outgoingHeaderMatcher HeaderMatcherFunc
outgoingTrailerMatcher HeaderMatcherFunc
metadataAnnotators []func(context.Context, *http.Request) metadata.MD metadataAnnotators []func(context.Context, *http.Request) metadata.MD
errorHandler ErrorHandlerFunc errorHandler ErrorHandlerFunc
streamErrorHandler StreamErrorHandlerFunc streamErrorHandler StreamErrorHandlerFunc
@ -114,10 +115,18 @@ func DefaultHeaderMatcher(key string) (string, bool) {
return "", false return "", false
} }
func defaultOutgoingHeaderMatcher(key string) (string, bool) {
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true
}
func defaultOutgoingTrailerMatcher(key string) (string, bool) {
return fmt.Sprintf("%s%s", MetadataTrailerPrefix, key), true
}
// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. // WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway.
// //
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be // This matcher will be called with each header in http.Request. If matcher returns true, that header will be
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. // passed to gRPC context. To transform the header before passing to gRPC context, matcher should return the modified header.
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
for _, header := range fn.matchedMalformedHeaders() { for _, header := range fn.matchedMalformedHeaders() {
grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header) grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
@ -147,13 +156,24 @@ func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
// //
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be // This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
// passed to http response returned from gateway. To transform the header before passing to response, // passed to http response returned from gateway. To transform the header before passing to response,
// matcher should return modified header. // matcher should return the modified header.
func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
return func(mux *ServeMux) { return func(mux *ServeMux) {
mux.outgoingHeaderMatcher = fn mux.outgoingHeaderMatcher = fn
} }
} }
// WithOutgoingTrailerMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
//
// This matcher will be called with each header in response trailer metadata. If matcher returns true, that header will be
// passed to http response returned from gateway. To transform the header before passing to response,
// matcher should return the modified header.
func WithOutgoingTrailerMatcher(fn HeaderMatcherFunc) ServeMuxOption {
return func(mux *ServeMux) {
mux.outgoingTrailerMatcher = fn
}
}
// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. // WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context.
// //
// This can be used by services that need to read from http.Request and modify gRPC context. A common use case // This can be used by services that need to read from http.Request and modify gRPC context. A common use case
@ -273,11 +293,11 @@ func NewServeMux(opts ...ServeMuxOption) *ServeMux {
if serveMux.incomingHeaderMatcher == nil { if serveMux.incomingHeaderMatcher == nil {
serveMux.incomingHeaderMatcher = DefaultHeaderMatcher serveMux.incomingHeaderMatcher = DefaultHeaderMatcher
} }
if serveMux.outgoingHeaderMatcher == nil { if serveMux.outgoingHeaderMatcher == nil {
serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { serveMux.outgoingHeaderMatcher = defaultOutgoingHeaderMatcher
return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true }
} if serveMux.outgoingTrailerMatcher == nil {
serveMux.outgoingTrailerMatcher = defaultOutgoingTrailerMatcher
} }
return serveMux return serveMux
@ -321,13 +341,13 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
} }
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
r.Method = strings.ToUpper(override)
if err := r.ParseForm(); err != nil { if err := r.ParseForm(); err != nil {
_, outboundMarshaler := MarshalerForRequest(s, r) _, outboundMarshaler := MarshalerForRequest(s, r)
sterr := status.Error(codes.InvalidArgument, err.Error()) sterr := status.Error(codes.InvalidArgument, err.Error())
s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr) s.errorHandler(ctx, s, outboundMarshaler, w, r, sterr)
return return
} }
r.Method = strings.ToUpper(override)
} }
var pathComponents []string var pathComponents []string

View File

@ -51,11 +51,13 @@ func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *u
key = match[1] key = match[1]
values = append([]string{match[2]}, values...) values = append([]string{match[2]}, values...)
} }
fieldPath := strings.Split(key, ".")
msgValue := msg.ProtoReflect()
fieldPath := normalizeFieldPath(msgValue, strings.Split(key, "."))
if filter.HasCommonPrefix(fieldPath) { if filter.HasCommonPrefix(fieldPath) {
continue continue
} }
if err := populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, values); err != nil { if err := populateFieldValueFromPath(msgValue, fieldPath, values); err != nil {
return err return err
} }
} }
@ -68,6 +70,38 @@ func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value stri
return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value}) return populateFieldValueFromPath(msg.ProtoReflect(), fieldPath, []string{value})
} }
func normalizeFieldPath(msgValue protoreflect.Message, fieldPath []string) []string {
newFieldPath := make([]string, 0, len(fieldPath))
for i, fieldName := range fieldPath {
fields := msgValue.Descriptor().Fields()
fieldDesc := fields.ByTextName(fieldName)
if fieldDesc == nil {
fieldDesc = fields.ByJSONName(fieldName)
}
if fieldDesc == nil {
// return initial field path values if no matching message field was found
return fieldPath
}
newFieldPath = append(newFieldPath, string(fieldDesc.Name()))
// If this is the last element, we're done
if i == len(fieldPath)-1 {
break
}
// Only singular message fields are allowed
if fieldDesc.Message() == nil || fieldDesc.Cardinality() == protoreflect.Repeated {
return fieldPath
}
// Get the nested message
msgValue = msgValue.Get(fieldDesc).Message()
}
return newFieldPath
}
func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error { func populateFieldValueFromPath(msgValue protoreflect.Message, fieldPath []string, values []string) error {
if len(fieldPath) < 1 { if len(fieldPath) < 1 {
return errors.New("no field path") return errors.New("no field path")

View File

@ -28,6 +28,8 @@ var (
uint32Type = reflect.TypeOf(uint32(1)) uint32Type = reflect.TypeOf(uint32(1))
uint64Type = reflect.TypeOf(uint64(1)) uint64Type = reflect.TypeOf(uint64(1))
uintptrType = reflect.TypeOf(uintptr(1))
float32Type = reflect.TypeOf(float32(1)) float32Type = reflect.TypeOf(float32(1))
float64Type = reflect.TypeOf(float64(1)) float64Type = reflect.TypeOf(float64(1))
@ -308,11 +310,11 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
case reflect.Struct: case reflect.Struct:
{ {
// All structs enter here. We're not interested in most types. // All structs enter here. We're not interested in most types.
if !canConvert(obj1Value, timeType) { if !obj1Value.CanConvert(timeType) {
break break
} }
// time.Time can compared! // time.Time can be compared!
timeObj1, ok := obj1.(time.Time) timeObj1, ok := obj1.(time.Time)
if !ok { if !ok {
timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time) timeObj1 = obj1Value.Convert(timeType).Interface().(time.Time)
@ -328,7 +330,7 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
case reflect.Slice: case reflect.Slice:
{ {
// We only care about the []byte type. // We only care about the []byte type.
if !canConvert(obj1Value, bytesType) { if !obj1Value.CanConvert(bytesType) {
break break
} }
@ -345,6 +347,26 @@ func compare(obj1, obj2 interface{}, kind reflect.Kind) (CompareType, bool) {
return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true return CompareType(bytes.Compare(bytesObj1, bytesObj2)), true
} }
case reflect.Uintptr:
{
uintptrObj1, ok := obj1.(uintptr)
if !ok {
uintptrObj1 = obj1Value.Convert(uintptrType).Interface().(uintptr)
}
uintptrObj2, ok := obj2.(uintptr)
if !ok {
uintptrObj2 = obj2Value.Convert(uintptrType).Interface().(uintptr)
}
if uintptrObj1 > uintptrObj2 {
return compareGreater, true
}
if uintptrObj1 == uintptrObj2 {
return compareEqual, true
}
if uintptrObj1 < uintptrObj2 {
return compareLess, true
}
}
} }
return compareEqual, false return compareEqual, false

View File

@ -1,16 +0,0 @@
//go:build go1.17
// +build go1.17
// TODO: once support for Go 1.16 is dropped, this file can be
// merged/removed with assertion_compare_go1.17_test.go and
// assertion_compare_legacy.go
package assert
import "reflect"
// Wrapper around reflect.Value.CanConvert, for compatibility
// reasons.
func canConvert(value reflect.Value, to reflect.Type) bool {
return value.CanConvert(to)
}

View File

@ -1,16 +0,0 @@
//go:build !go1.17
// +build !go1.17
// TODO: once support for Go 1.16 is dropped, this file can be
// merged/removed with assertion_compare_go1.17_test.go and
// assertion_compare_can_convert.go
package assert
import "reflect"
// Older versions of Go does not have the reflect.Value.CanConvert
// method.
func canConvert(value reflect.Value, to reflect.Type) bool {
return false
}

View File

@ -1,7 +1,4 @@
/* // Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
package assert package assert
@ -107,7 +104,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...) return EqualExportedValues(t, expected, actual, append([]interface{}{msg}, args...)...)
} }
// EqualValuesf asserts that two objects are equal or convertable to the same types // EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal. // and equal.
// //
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
@ -616,6 +613,16 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf
return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...) return NotErrorIs(t, err, target, append([]interface{}{msg}, args...)...)
} }
// NotImplementsf asserts that an object does not implement the specified interface.
//
// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
return NotImplements(t, interfaceObject, object, append([]interface{}{msg}, args...)...)
}
// NotNilf asserts that the specified object is not nil. // NotNilf asserts that the specified object is not nil.
// //
// assert.NotNilf(t, err, "error message %s", "formatted") // assert.NotNilf(t, err, "error message %s", "formatted")
@ -660,10 +667,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...) return NotSame(t, expected, actual, append([]interface{}{msg}, args...)...)
} }
// NotSubsetf asserts that the specified list(array, slice...) contains not all // NotSubsetf asserts that the specified list(array, slice...) or map does NOT
// elements given in the specified subset(array, slice...). // contain all elements given in the specified subset list(array, slice...) or
// map.
// //
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -747,10 +756,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
return Same(t, expected, actual, append([]interface{}{msg}, args...)...) return Same(t, expected, actual, append([]interface{}{msg}, args...)...)
} }
// Subsetf asserts that the specified list(array, slice...) contains all // Subsetf asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset(array, slice...). // elements given in the specified subset list(array, slice...) or map.
// //
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool { func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()

View File

@ -1,7 +1,4 @@
/* // Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
package assert package assert
@ -189,7 +186,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
return EqualExportedValuesf(a.t, expected, actual, msg, args...) return EqualExportedValuesf(a.t, expected, actual, msg, args...)
} }
// EqualValues asserts that two objects are equal or convertable to the same types // EqualValues asserts that two objects are equal or convertible to the same types
// and equal. // and equal.
// //
// a.EqualValues(uint32(123), int32(123)) // a.EqualValues(uint32(123), int32(123))
@ -200,7 +197,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
return EqualValues(a.t, expected, actual, msgAndArgs...) return EqualValues(a.t, expected, actual, msgAndArgs...)
} }
// EqualValuesf asserts that two objects are equal or convertable to the same types // EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal. // and equal.
// //
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
@ -1221,6 +1218,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in
return NotErrorIsf(a.t, err, target, msg, args...) return NotErrorIsf(a.t, err, target, msg, args...)
} }
// NotImplements asserts that an object does not implement the specified interface.
//
// a.NotImplements((*MyInterface)(nil), new(MyObject))
func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotImplements(a.t, interfaceObject, object, msgAndArgs...)
}
// NotImplementsf asserts that an object does not implement the specified interface.
//
// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
return NotImplementsf(a.t, interfaceObject, object, msg, args...)
}
// NotNil asserts that the specified object is not nil. // NotNil asserts that the specified object is not nil.
// //
// a.NotNil(err) // a.NotNil(err)
@ -1309,10 +1326,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
return NotSamef(a.t, expected, actual, msg, args...) return NotSamef(a.t, expected, actual, msg, args...)
} }
// NotSubset asserts that the specified list(array, slice...) contains not all // NotSubset asserts that the specified list(array, slice...) or map does NOT
// elements given in the specified subset(array, slice...). // contain all elements given in the specified subset list(array, slice...) or
// map.
// //
// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") // a.NotSubset([1, 3, 4], [1, 2])
// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1320,10 +1339,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
return NotSubset(a.t, list, subset, msgAndArgs...) return NotSubset(a.t, list, subset, msgAndArgs...)
} }
// NotSubsetf asserts that the specified list(array, slice...) contains not all // NotSubsetf asserts that the specified list(array, slice...) or map does NOT
// elements given in the specified subset(array, slice...). // contain all elements given in the specified subset list(array, slice...) or
// map.
// //
// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1483,10 +1504,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
return Samef(a.t, expected, actual, msg, args...) return Samef(a.t, expected, actual, msg, args...)
} }
// Subset asserts that the specified list(array, slice...) contains all // Subset asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset(array, slice...). // elements given in the specified subset list(array, slice...) or map.
// //
// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") // a.Subset([1, 2, 3], [1, 2])
// a.Subset({"x": 1, "y": 2}, {"x": 1})
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool { func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1494,10 +1516,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
return Subset(a.t, list, subset, msgAndArgs...) return Subset(a.t, list, subset, msgAndArgs...)
} }
// Subsetf asserts that the specified list(array, slice...) contains all // Subsetf asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset(array, slice...). // elements given in the specified subset list(array, slice...) or map.
// //
// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool { func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) bool {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()

View File

@ -19,7 +19,7 @@ import (
"github.com/davecgh/go-spew/spew" "github.com/davecgh/go-spew/spew"
"github.com/pmezard/go-difflib/difflib" "github.com/pmezard/go-difflib/difflib"
yaml "gopkg.in/yaml.v3" "gopkg.in/yaml.v3"
) )
//go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl" //go:generate sh -c "cd ../_codegen && go build && cd - && ../_codegen/_codegen -output-package=assert -template=assertion_format.go.tmpl"
@ -110,7 +110,12 @@ func copyExportedFields(expected interface{}) interface{} {
return result.Interface() return result.Interface()
case reflect.Array, reflect.Slice: case reflect.Array, reflect.Slice:
result := reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len()) var result reflect.Value
if expectedKind == reflect.Array {
result = reflect.New(reflect.ArrayOf(expectedValue.Len(), expectedType.Elem())).Elem()
} else {
result = reflect.MakeSlice(expectedType, expectedValue.Len(), expectedValue.Len())
}
for i := 0; i < expectedValue.Len(); i++ { for i := 0; i < expectedValue.Len(); i++ {
index := expectedValue.Index(i) index := expectedValue.Index(i)
if isNil(index) { if isNil(index) {
@ -140,6 +145,8 @@ func copyExportedFields(expected interface{}) interface{} {
// structures. // structures.
// //
// This function does no assertion of any kind. // This function does no assertion of any kind.
//
// Deprecated: Use [EqualExportedValues] instead.
func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool { func ObjectsExportedFieldsAreEqual(expected, actual interface{}) bool {
expectedCleaned := copyExportedFields(expected) expectedCleaned := copyExportedFields(expected)
actualCleaned := copyExportedFields(actual) actualCleaned := copyExportedFields(actual)
@ -153,17 +160,40 @@ func ObjectsAreEqualValues(expected, actual interface{}) bool {
return true return true
} }
actualType := reflect.TypeOf(actual) expectedValue := reflect.ValueOf(expected)
if actualType == nil { actualValue := reflect.ValueOf(actual)
if !expectedValue.IsValid() || !actualValue.IsValid() {
return false return false
} }
expectedValue := reflect.ValueOf(expected)
if expectedValue.IsValid() && expectedValue.Type().ConvertibleTo(actualType) { expectedType := expectedValue.Type()
// Attempt comparison after type conversion actualType := actualValue.Type()
return reflect.DeepEqual(expectedValue.Convert(actualType).Interface(), actual) if !expectedType.ConvertibleTo(actualType) {
return false
} }
return false if !isNumericType(expectedType) || !isNumericType(actualType) {
// Attempt comparison after type conversion
return reflect.DeepEqual(
expectedValue.Convert(actualType).Interface(), actual,
)
}
// If BOTH values are numeric, there are chances of false positives due
// to overflow or underflow. So, we need to make sure to always convert
// the smaller type to a larger type before comparing.
if expectedType.Size() >= actualType.Size() {
return actualValue.Convert(expectedType).Interface() == expected
}
return expectedValue.Convert(actualType).Interface() == actual
}
// isNumericType returns true if the type is one of:
// int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64,
// float32, float64, complex64, complex128
func isNumericType(t reflect.Type) bool {
return t.Kind() >= reflect.Int && t.Kind() <= reflect.Complex128
} }
/* CallerInfo is necessary because the assert functions use the testing object /* CallerInfo is necessary because the assert functions use the testing object
@ -266,7 +296,7 @@ func messageFromMsgAndArgs(msgAndArgs ...interface{}) string {
// Aligns the provided message so that all lines after the first line start at the same location as the first line. // Aligns the provided message so that all lines after the first line start at the same location as the first line.
// Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab). // Assumes that the first line starts at the correct location (after carriage return, tab, label, spacer and tab).
// The longestLabelLen parameter specifies the length of the longest label in the output (required becaues this is the // The longestLabelLen parameter specifies the length of the longest label in the output (required because this is the
// basis on which the alignment occurs). // basis on which the alignment occurs).
func indentMessageLines(message string, longestLabelLen int) string { func indentMessageLines(message string, longestLabelLen int) string {
outBuf := new(bytes.Buffer) outBuf := new(bytes.Buffer)
@ -382,6 +412,25 @@ func Implements(t TestingT, interfaceObject interface{}, object interface{}, msg
return true return true
} }
// NotImplements asserts that an object does not implement the specified interface.
//
// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok {
h.Helper()
}
interfaceType := reflect.TypeOf(interfaceObject).Elem()
if object == nil {
return Fail(t, fmt.Sprintf("Cannot check if nil does not implement %v", interfaceType), msgAndArgs...)
}
if reflect.TypeOf(object).Implements(interfaceType) {
return Fail(t, fmt.Sprintf("%T implements %v", object, interfaceType), msgAndArgs...)
}
return true
}
// IsType asserts that the specified objects are of the same type. // IsType asserts that the specified objects are of the same type.
func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool { func IsType(t TestingT, expectedType interface{}, object interface{}, msgAndArgs ...interface{}) bool {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
@ -496,7 +545,7 @@ func samePointers(first, second interface{}) bool {
// representations appropriate to be presented to the user. // representations appropriate to be presented to the user.
// //
// If the values are not of like type, the returned strings will be prefixed // If the values are not of like type, the returned strings will be prefixed
// with the type name, and the value will be enclosed in parenthesis similar // with the type name, and the value will be enclosed in parentheses similar
// to a type conversion in the Go grammar. // to a type conversion in the Go grammar.
func formatUnequalValues(expected, actual interface{}) (e string, a string) { func formatUnequalValues(expected, actual interface{}) (e string, a string) {
if reflect.TypeOf(expected) != reflect.TypeOf(actual) { if reflect.TypeOf(expected) != reflect.TypeOf(actual) {
@ -523,7 +572,7 @@ func truncatingFormat(data interface{}) string {
return value return value
} }
// EqualValues asserts that two objects are equal or convertable to the same types // EqualValues asserts that two objects are equal or convertible to the same types
// and equal. // and equal.
// //
// assert.EqualValues(t, uint32(123), int32(123)) // assert.EqualValues(t, uint32(123), int32(123))
@ -566,12 +615,19 @@ func EqualExportedValues(t TestingT, expected, actual interface{}, msgAndArgs ..
return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...) return Fail(t, fmt.Sprintf("Types expected to match exactly\n\t%v != %v", aType, bType), msgAndArgs...)
} }
if aType.Kind() == reflect.Ptr {
aType = aType.Elem()
}
if bType.Kind() == reflect.Ptr {
bType = bType.Elem()
}
if aType.Kind() != reflect.Struct { if aType.Kind() != reflect.Struct {
return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...) return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", aType.Kind(), reflect.Struct), msgAndArgs...)
} }
if bType.Kind() != reflect.Struct { if bType.Kind() != reflect.Struct {
return Fail(t, fmt.Sprintf("Types expected to both be struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...) return Fail(t, fmt.Sprintf("Types expected to both be struct or pointer to struct \n\t%v != %v", bType.Kind(), reflect.Struct), msgAndArgs...)
} }
expected = copyExportedFields(expected) expected = copyExportedFields(expected)
@ -620,17 +676,6 @@ func NotNil(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
return Fail(t, "Expected value not to be nil.", msgAndArgs...) return Fail(t, "Expected value not to be nil.", msgAndArgs...)
} }
// containsKind checks if a specified kind in the slice of kinds.
func containsKind(kinds []reflect.Kind, kind reflect.Kind) bool {
for i := 0; i < len(kinds); i++ {
if kind == kinds[i] {
return true
}
}
return false
}
// isNil checks if a specified object is nil or not, without Failing. // isNil checks if a specified object is nil or not, without Failing.
func isNil(object interface{}) bool { func isNil(object interface{}) bool {
if object == nil { if object == nil {
@ -638,16 +683,13 @@ func isNil(object interface{}) bool {
} }
value := reflect.ValueOf(object) value := reflect.ValueOf(object)
kind := value.Kind() switch value.Kind() {
isNilableKind := containsKind( case
[]reflect.Kind{ reflect.Chan, reflect.Func,
reflect.Chan, reflect.Func, reflect.Interface, reflect.Map,
reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
reflect.Ptr, reflect.Slice, reflect.UnsafePointer},
kind)
if isNilableKind && value.IsNil() { return value.IsNil()
return true
} }
return false return false
@ -731,16 +773,14 @@ func NotEmpty(t TestingT, object interface{}, msgAndArgs ...interface{}) bool {
} }
// getLen try to get length of object. // getLen tries to get the length of an object.
// return (false, 0) if impossible. // It returns (0, false) if impossible.
func getLen(x interface{}) (ok bool, length int) { func getLen(x interface{}) (length int, ok bool) {
v := reflect.ValueOf(x) v := reflect.ValueOf(x)
defer func() { defer func() {
if e := recover(); e != nil { ok = recover() == nil
ok = false
}
}() }()
return true, v.Len() return v.Len(), true
} }
// Len asserts that the specified object has specific length. // Len asserts that the specified object has specific length.
@ -751,13 +791,13 @@ func Len(t TestingT, object interface{}, length int, msgAndArgs ...interface{})
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
ok, l := getLen(object) l, ok := getLen(object)
if !ok { if !ok {
return Fail(t, fmt.Sprintf("\"%s\" could not be applied builtin len()", object), msgAndArgs...) return Fail(t, fmt.Sprintf("\"%v\" could not be applied builtin len()", object), msgAndArgs...)
} }
if l != length { if l != length {
return Fail(t, fmt.Sprintf("\"%s\" should have %d item(s), but has %d", object, length, l), msgAndArgs...) return Fail(t, fmt.Sprintf("\"%v\" should have %d item(s), but has %d", object, length, l), msgAndArgs...)
} }
return true return true
} }
@ -919,10 +959,11 @@ func NotContains(t TestingT, s, contains interface{}, msgAndArgs ...interface{})
} }
// Subset asserts that the specified list(array, slice...) contains all // Subset asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset(array, slice...). // elements given in the specified subset list(array, slice...) or map.
// //
// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") // assert.Subset(t, [1, 2, 3], [1, 2])
// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -975,10 +1016,12 @@ func Subset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok
return true return true
} }
// NotSubset asserts that the specified list(array, slice...) contains not all // NotSubset asserts that the specified list(array, slice...) or map does NOT
// elements given in the specified subset(array, slice...). // contain all elements given in the specified subset list(array, slice...) or
// map.
// //
// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") // assert.NotSubset(t, [1, 3, 4], [1, 2])
// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) { func NotSubset(t TestingT, list, subset interface{}, msgAndArgs ...interface{}) (ok bool) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1439,7 +1482,7 @@ func InEpsilon(t TestingT, expected, actual interface{}, epsilon float64, msgAnd
h.Helper() h.Helper()
} }
if math.IsNaN(epsilon) { if math.IsNaN(epsilon) {
return Fail(t, "epsilon must not be NaN") return Fail(t, "epsilon must not be NaN", msgAndArgs...)
} }
actualEpsilon, err := calcRelativeError(expected, actual) actualEpsilon, err := calcRelativeError(expected, actual)
if err != nil { if err != nil {
@ -1458,19 +1501,26 @@ func InEpsilonSlice(t TestingT, expected, actual interface{}, epsilon float64, m
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
} }
if expected == nil || actual == nil ||
reflect.TypeOf(actual).Kind() != reflect.Slice || if expected == nil || actual == nil {
reflect.TypeOf(expected).Kind() != reflect.Slice {
return Fail(t, "Parameters must be slice", msgAndArgs...) return Fail(t, "Parameters must be slice", msgAndArgs...)
} }
actualSlice := reflect.ValueOf(actual)
expectedSlice := reflect.ValueOf(expected) expectedSlice := reflect.ValueOf(expected)
actualSlice := reflect.ValueOf(actual)
for i := 0; i < actualSlice.Len(); i++ { if expectedSlice.Type().Kind() != reflect.Slice {
result := InEpsilon(t, actualSlice.Index(i).Interface(), expectedSlice.Index(i).Interface(), epsilon) return Fail(t, "Expected value must be slice", msgAndArgs...)
if !result { }
return result
expectedLen := expectedSlice.Len()
if !IsType(t, expected, actual) || !Len(t, actual, expectedLen) {
return false
}
for i := 0; i < expectedLen; i++ {
if !InEpsilon(t, expectedSlice.Index(i).Interface(), actualSlice.Index(i).Interface(), epsilon, "at index %d", i) {
return false
} }
} }
@ -1870,23 +1920,18 @@ func (c *CollectT) Errorf(format string, args ...interface{}) {
} }
// FailNow panics. // FailNow panics.
func (c *CollectT) FailNow() { func (*CollectT) FailNow() {
panic("Assertion failed") panic("Assertion failed")
} }
// Reset clears the collected errors. // Deprecated: That was a method for internal usage that should not have been published. Now just panics.
func (c *CollectT) Reset() { func (*CollectT) Reset() {
c.errors = nil panic("Reset() is deprecated")
} }
// Copy copies the collected errors to the supplied t. // Deprecated: That was a method for internal usage that should not have been published. Now just panics.
func (c *CollectT) Copy(t TestingT) { func (*CollectT) Copy(TestingT) {
if tt, ok := t.(tHelper); ok { panic("Copy() is deprecated")
tt.Helper()
}
for _, err := range c.errors {
t.Errorf("%v", err)
}
} }
// EventuallyWithT asserts that given condition will be met in waitFor time, // EventuallyWithT asserts that given condition will be met in waitFor time,
@ -1912,8 +1957,8 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
h.Helper() h.Helper()
} }
collect := new(CollectT) var lastFinishedTickErrs []error
ch := make(chan bool, 1) ch := make(chan []error, 1)
timer := time.NewTimer(waitFor) timer := time.NewTimer(waitFor)
defer timer.Stop() defer timer.Stop()
@ -1924,19 +1969,25 @@ func EventuallyWithT(t TestingT, condition func(collect *CollectT), waitFor time
for tick := ticker.C; ; { for tick := ticker.C; ; {
select { select {
case <-timer.C: case <-timer.C:
collect.Copy(t) for _, err := range lastFinishedTickErrs {
t.Errorf("%v", err)
}
return Fail(t, "Condition never satisfied", msgAndArgs...) return Fail(t, "Condition never satisfied", msgAndArgs...)
case <-tick: case <-tick:
tick = nil tick = nil
collect.Reset()
go func() { go func() {
collect := new(CollectT)
defer func() {
ch <- collect.errors
}()
condition(collect) condition(collect)
ch <- len(collect.errors) == 0
}() }()
case v := <-ch: case errs := <-ch:
if v { if len(errs) == 0 {
return true return true
} }
// Keep the errors from the last ended condition, so that they can be copied to t if timeout is reached.
lastFinishedTickErrs = errs
tick = ticker.C tick = ticker.C
} }
} }

View File

@ -12,7 +12,7 @@ import (
// an error if building a new request fails. // an error if building a new request fails.
func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) { func httpCode(handler http.HandlerFunc, method, url string, values url.Values) (int, error) {
w := httptest.NewRecorder() w := httptest.NewRecorder()
req, err := http.NewRequest(method, url, nil) req, err := http.NewRequest(method, url, http.NoBody)
if err != nil { if err != nil {
return -1, err return -1, err
} }
@ -32,12 +32,12 @@ func HTTPSuccess(t TestingT, handler http.HandlerFunc, method, url string, value
} }
code, err := httpCode(handler, method, url, values) code, err := httpCode(handler, method, url, values)
if err != nil { if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
} }
isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent isSuccessCode := code >= http.StatusOK && code <= http.StatusPartialContent
if !isSuccessCode { if !isSuccessCode {
Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code)) Fail(t, fmt.Sprintf("Expected HTTP success status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
} }
return isSuccessCode return isSuccessCode
@ -54,12 +54,12 @@ func HTTPRedirect(t TestingT, handler http.HandlerFunc, method, url string, valu
} }
code, err := httpCode(handler, method, url, values) code, err := httpCode(handler, method, url, values)
if err != nil { if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
} }
isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect isRedirectCode := code >= http.StatusMultipleChoices && code <= http.StatusTemporaryRedirect
if !isRedirectCode { if !isRedirectCode {
Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code)) Fail(t, fmt.Sprintf("Expected HTTP redirect status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
} }
return isRedirectCode return isRedirectCode
@ -76,12 +76,12 @@ func HTTPError(t TestingT, handler http.HandlerFunc, method, url string, values
} }
code, err := httpCode(handler, method, url, values) code, err := httpCode(handler, method, url, values)
if err != nil { if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
} }
isErrorCode := code >= http.StatusBadRequest isErrorCode := code >= http.StatusBadRequest
if !isErrorCode { if !isErrorCode {
Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code)) Fail(t, fmt.Sprintf("Expected HTTP error status code for %q but received %d", url+"?"+values.Encode(), code), msgAndArgs...)
} }
return isErrorCode return isErrorCode
@ -98,12 +98,12 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va
} }
code, err := httpCode(handler, method, url, values) code, err := httpCode(handler, method, url, values)
if err != nil { if err != nil {
Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err)) Fail(t, fmt.Sprintf("Failed to build test request, got error: %s", err), msgAndArgs...)
} }
successful := code == statuscode successful := code == statuscode
if !successful { if !successful {
Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code)) Fail(t, fmt.Sprintf("Expected HTTP status code %d for %q but received %d", statuscode, url+"?"+values.Encode(), code), msgAndArgs...)
} }
return successful return successful
@ -113,7 +113,10 @@ func HTTPStatusCode(t TestingT, handler http.HandlerFunc, method, url string, va
// empty string if building a new request fails. // empty string if building a new request fails.
func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string { func HTTPBody(handler http.HandlerFunc, method, url string, values url.Values) string {
w := httptest.NewRecorder() w := httptest.NewRecorder()
req, err := http.NewRequest(method, url+"?"+values.Encode(), nil) if len(values) > 0 {
url += "?" + values.Encode()
}
req, err := http.NewRequest(method, url, http.NoBody)
if err != nil { if err != nil {
return "" return ""
} }
@ -135,7 +138,7 @@ func HTTPBodyContains(t TestingT, handler http.HandlerFunc, method, url string,
contains := strings.Contains(body, fmt.Sprint(str)) contains := strings.Contains(body, fmt.Sprint(str))
if !contains { if !contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) Fail(t, fmt.Sprintf("Expected response body for \"%s\" to contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
} }
return contains return contains
@ -155,7 +158,7 @@ func HTTPBodyNotContains(t TestingT, handler http.HandlerFunc, method, url strin
contains := strings.Contains(body, fmt.Sprint(str)) contains := strings.Contains(body, fmt.Sprint(str))
if contains { if contains {
Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body)) Fail(t, fmt.Sprintf("Expected response body for \"%s\" to NOT contain \"%s\" but found \"%s\"", url+"?"+values.Encode(), str, body), msgAndArgs...)
} }
return !contains return !contains

View File

@ -1,7 +1,4 @@
/* // Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
package require package require
@ -235,7 +232,7 @@ func EqualExportedValuesf(t TestingT, expected interface{}, actual interface{},
t.FailNow() t.FailNow()
} }
// EqualValues asserts that two objects are equal or convertable to the same types // EqualValues asserts that two objects are equal or convertible to the same types
// and equal. // and equal.
// //
// assert.EqualValues(t, uint32(123), int32(123)) // assert.EqualValues(t, uint32(123), int32(123))
@ -249,7 +246,7 @@ func EqualValues(t TestingT, expected interface{}, actual interface{}, msgAndArg
t.FailNow() t.FailNow()
} }
// EqualValuesf asserts that two objects are equal or convertable to the same types // EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal. // and equal.
// //
// assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted") // assert.EqualValuesf(t, uint32(123), int32(123), "error message %s", "formatted")
@ -1546,6 +1543,32 @@ func NotErrorIsf(t TestingT, err error, target error, msg string, args ...interf
t.FailNow() t.FailNow()
} }
// NotImplements asserts that an object does not implement the specified interface.
//
// assert.NotImplements(t, (*MyInterface)(nil), new(MyObject))
func NotImplements(t TestingT, interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotImplements(t, interfaceObject, object, msgAndArgs...) {
return
}
t.FailNow()
}
// NotImplementsf asserts that an object does not implement the specified interface.
//
// assert.NotImplementsf(t, (*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func NotImplementsf(t TestingT, interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok {
h.Helper()
}
if assert.NotImplementsf(t, interfaceObject, object, msg, args...) {
return
}
t.FailNow()
}
// NotNil asserts that the specified object is not nil. // NotNil asserts that the specified object is not nil.
// //
// assert.NotNil(t, err) // assert.NotNil(t, err)
@ -1658,10 +1681,12 @@ func NotSamef(t TestingT, expected interface{}, actual interface{}, msg string,
t.FailNow() t.FailNow()
} }
// NotSubset asserts that the specified list(array, slice...) contains not all // NotSubset asserts that the specified list(array, slice...) or map does NOT
// elements given in the specified subset(array, slice...). // contain all elements given in the specified subset list(array, slice...) or
// map.
// //
// assert.NotSubset(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") // assert.NotSubset(t, [1, 3, 4], [1, 2])
// assert.NotSubset(t, {"x": 1, "y": 2}, {"z": 3})
func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1672,10 +1697,12 @@ func NotSubset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...i
t.FailNow() t.FailNow()
} }
// NotSubsetf asserts that the specified list(array, slice...) contains not all // NotSubsetf asserts that the specified list(array, slice...) or map does NOT
// elements given in the specified subset(array, slice...). // contain all elements given in the specified subset list(array, slice...) or
// map.
// //
// assert.NotSubsetf(t, [1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") // assert.NotSubsetf(t, [1, 3, 4], [1, 2], "error message %s", "formatted")
// assert.NotSubsetf(t, {"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { func NotSubsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1880,10 +1907,11 @@ func Samef(t TestingT, expected interface{}, actual interface{}, msg string, arg
t.FailNow() t.FailNow()
} }
// Subset asserts that the specified list(array, slice...) contains all // Subset asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset(array, slice...). // elements given in the specified subset list(array, slice...) or map.
// //
// assert.Subset(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") // assert.Subset(t, [1, 2, 3], [1, 2])
// assert.Subset(t, {"x": 1, "y": 2}, {"x": 1})
func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) { func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()
@ -1894,10 +1922,11 @@ func Subset(t TestingT, list interface{}, subset interface{}, msgAndArgs ...inte
t.FailNow() t.FailNow()
} }
// Subsetf asserts that the specified list(array, slice...) contains all // Subsetf asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset(array, slice...). // elements given in the specified subset list(array, slice...) or map.
// //
// assert.Subsetf(t, [1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") // assert.Subsetf(t, [1, 2, 3], [1, 2], "error message %s", "formatted")
// assert.Subsetf(t, {"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) { func Subsetf(t TestingT, list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := t.(tHelper); ok { if h, ok := t.(tHelper); ok {
h.Helper() h.Helper()

View File

@ -1,7 +1,4 @@
/* // Code generated with github.com/stretchr/testify/_codegen; DO NOT EDIT.
* CODE GENERATED AUTOMATICALLY WITH github.com/stretchr/testify/_codegen
* THIS FILE MUST NOT BE EDITED BY HAND
*/
package require package require
@ -190,7 +187,7 @@ func (a *Assertions) EqualExportedValuesf(expected interface{}, actual interface
EqualExportedValuesf(a.t, expected, actual, msg, args...) EqualExportedValuesf(a.t, expected, actual, msg, args...)
} }
// EqualValues asserts that two objects are equal or convertable to the same types // EqualValues asserts that two objects are equal or convertible to the same types
// and equal. // and equal.
// //
// a.EqualValues(uint32(123), int32(123)) // a.EqualValues(uint32(123), int32(123))
@ -201,7 +198,7 @@ func (a *Assertions) EqualValues(expected interface{}, actual interface{}, msgAn
EqualValues(a.t, expected, actual, msgAndArgs...) EqualValues(a.t, expected, actual, msgAndArgs...)
} }
// EqualValuesf asserts that two objects are equal or convertable to the same types // EqualValuesf asserts that two objects are equal or convertible to the same types
// and equal. // and equal.
// //
// a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted") // a.EqualValuesf(uint32(123), int32(123), "error message %s", "formatted")
@ -1222,6 +1219,26 @@ func (a *Assertions) NotErrorIsf(err error, target error, msg string, args ...in
NotErrorIsf(a.t, err, target, msg, args...) NotErrorIsf(a.t, err, target, msg, args...)
} }
// NotImplements asserts that an object does not implement the specified interface.
//
// a.NotImplements((*MyInterface)(nil), new(MyObject))
func (a *Assertions) NotImplements(interfaceObject interface{}, object interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotImplements(a.t, interfaceObject, object, msgAndArgs...)
}
// NotImplementsf asserts that an object does not implement the specified interface.
//
// a.NotImplementsf((*MyInterface)(nil), new(MyObject), "error message %s", "formatted")
func (a *Assertions) NotImplementsf(interfaceObject interface{}, object interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok {
h.Helper()
}
NotImplementsf(a.t, interfaceObject, object, msg, args...)
}
// NotNil asserts that the specified object is not nil. // NotNil asserts that the specified object is not nil.
// //
// a.NotNil(err) // a.NotNil(err)
@ -1310,10 +1327,12 @@ func (a *Assertions) NotSamef(expected interface{}, actual interface{}, msg stri
NotSamef(a.t, expected, actual, msg, args...) NotSamef(a.t, expected, actual, msg, args...)
} }
// NotSubset asserts that the specified list(array, slice...) contains not all // NotSubset asserts that the specified list(array, slice...) or map does NOT
// elements given in the specified subset(array, slice...). // contain all elements given in the specified subset list(array, slice...) or
// map.
// //
// a.NotSubset([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]") // a.NotSubset([1, 3, 4], [1, 2])
// a.NotSubset({"x": 1, "y": 2}, {"z": 3})
func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1321,10 +1340,12 @@ func (a *Assertions) NotSubset(list interface{}, subset interface{}, msgAndArgs
NotSubset(a.t, list, subset, msgAndArgs...) NotSubset(a.t, list, subset, msgAndArgs...)
} }
// NotSubsetf asserts that the specified list(array, slice...) contains not all // NotSubsetf asserts that the specified list(array, slice...) or map does NOT
// elements given in the specified subset(array, slice...). // contain all elements given in the specified subset list(array, slice...) or
// map.
// //
// a.NotSubsetf([1, 3, 4], [1, 2], "But [1, 3, 4] does not contain [1, 2]", "error message %s", "formatted") // a.NotSubsetf([1, 3, 4], [1, 2], "error message %s", "formatted")
// a.NotSubsetf({"x": 1, "y": 2}, {"z": 3}, "error message %s", "formatted")
func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { func (a *Assertions) NotSubsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1484,10 +1505,11 @@ func (a *Assertions) Samef(expected interface{}, actual interface{}, msg string,
Samef(a.t, expected, actual, msg, args...) Samef(a.t, expected, actual, msg, args...)
} }
// Subset asserts that the specified list(array, slice...) contains all // Subset asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset(array, slice...). // elements given in the specified subset list(array, slice...) or map.
// //
// a.Subset([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]") // a.Subset([1, 2, 3], [1, 2])
// a.Subset({"x": 1, "y": 2}, {"x": 1})
func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) { func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()
@ -1495,10 +1517,11 @@ func (a *Assertions) Subset(list interface{}, subset interface{}, msgAndArgs ...
Subset(a.t, list, subset, msgAndArgs...) Subset(a.t, list, subset, msgAndArgs...)
} }
// Subsetf asserts that the specified list(array, slice...) contains all // Subsetf asserts that the specified list(array, slice...) or map contains all
// elements given in the specified subset(array, slice...). // elements given in the specified subset list(array, slice...) or map.
// //
// a.Subsetf([1, 2, 3], [1, 2], "But [1, 2, 3] does contain [1, 2]", "error message %s", "formatted") // a.Subsetf([1, 2, 3], [1, 2], "error message %s", "formatted")
// a.Subsetf({"x": 1, "y": 2}, {"x": 1}, "error message %s", "formatted")
func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) { func (a *Assertions) Subsetf(list interface{}, subset interface{}, msg string, args ...interface{}) {
if h, ok := a.t.(tHelper); ok { if h, ok := a.t.(tHelper); ok {
h.Helper() h.Helper()

View File

@ -3,3 +3,5 @@ fo
te te
collison collison
consequentially consequentially
ans
nam

View File

@ -8,6 +8,192 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm
## [Unreleased] ## [Unreleased]
## [1.26.0/0.48.0/0.2.0-alpha] 2024-04-24
### Added
- Add `Recorder` in `go.opentelemetry.io/otel/log/logtest` to facilitate testing the log bridge implementations. (#5134)
- Add span flags to OTLP spans and links exported by `go.opentelemetry.io/otel/exporters/otlp/otlptrace`. (#5194)
- Make the initial alpha release of `go.opentelemetry.io/otel/sdk/log`.
This new module contains the Go implementation of the OpenTelemetry Logs SDK.
This module is unstable and breaking changes may be introduced.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/otlp/otlplog/otlploghttp`.
This new module contains an OTLP exporter that transmits log telemetry using HTTP.
This module is unstable and breaking changes may be introduced.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
- Make the initial alpha release of `go.opentelemetry.io/otel/exporters/stdout/stdoutlog`.
This new module contains an exporter prints log records to STDOUT.
This module is unstable and breaking changes may be introduced.
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees. (#5240)
- The `go.opentelemetry.io/otel/semconv/v1.25.0` package.
The package contains semantic conventions from the `v1.25.0` version of the OpenTelemetry Semantic Conventions. (#5254)
### Changed
- Update `go.opentelemetry.io/proto/otlp` from v1.1.0 to v1.2.0. (#5177)
- Improve performance of baggage member character validation in `go.opentelemetry.io/otel/baggage`. (#5214)
## [1.25.0/0.47.0/0.0.8/0.1.0-alpha] 2024-04-05
### Added
- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4906)
- Add `WithProxy` option in `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlptracehttp`. (#4906)
- Add `AddLink` method to the `Span` interface in `go.opentelemetry.io/otel/trace`. (#5032)
- The `Enabled` method is added to the `Logger` interface in `go.opentelemetry.io/otel/log`.
This method is used to notify users if a log record will be emitted or not. (#5071)
- Add `SeverityUndefined` `const` to `go.opentelemetry.io/otel/log`.
This value represents an unset severity level. (#5072)
- Add `Empty` function in `go.opentelemetry.io/otel/log` to return a `KeyValue` for an empty value. (#5076)
- Add `go.opentelemetry.io/otel/log/global` to manage the global `LoggerProvider`.
This package is provided with the anticipation that all functionality will be migrate to `go.opentelemetry.io/otel` when `go.opentelemetry.io/otel/log` stabilizes.
At which point, users will be required to migrage their code, and this package will be deprecated then removed. (#5085)
- Add support for `Summary` metrics in the `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` exporters. (#5100)
- Add `otel.scope.name` and `otel.scope.version` tags to spans exported by `go.opentelemetry.io/otel/exporters/zipkin`. (#5108)
- Add support for `AddLink` to `go.opentelemetry.io/otel/bridge/opencensus`. (#5116)
- Add `String` method to `Value` and `KeyValue` in `go.opentelemetry.io/otel/log`. (#5117)
- Add Exemplar support to `go.opentelemetry.io/otel/exporters/prometheus`. (#5111)
- Add metric semantic conventions to `go.opentelemetry.io/otel/semconv/v1.24.0`. Future `semconv` packages will include metric semantic conventions as well. (#4528)
### Changed
- `SpanFromContext` and `SpanContextFromContext` in `go.opentelemetry.io/otel/trace` no longer make a heap allocation when the passed context has no span. (#5049)
- `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` and `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` now create a gRPC client in idle mode and with "dns" as the default resolver using [`grpc.NewClient`](https://pkg.go.dev/google.golang.org/grpc#NewClient). (#5151)
Because of that `WithDialOption` ignores [`grpc.WithBlock`](https://pkg.go.dev/google.golang.org/grpc#WithBlock), [`grpc.WithTimeout`](https://pkg.go.dev/google.golang.org/grpc#WithTimeout), and [`grpc.WithReturnConnectionError`](https://pkg.go.dev/google.golang.org/grpc#WithReturnConnectionError).
Notice that [`grpc.DialContext`](https://pkg.go.dev/google.golang.org/grpc#DialContext) which was used before is now deprecated.
### Fixed
- Clarify the documentation about equivalence guarantees for the `Set` and `Distinct` types in `go.opentelemetry.io/otel/attribute`. (#5027)
- Prevent default `ErrorHandler` self-delegation. (#5137)
- Update all dependencies to address [GO-2024-2687]. (#5139)
### Removed
- Drop support for [Go 1.20]. (#4967)
### Deprecated
- Deprecate `go.opentelemetry.io/otel/attribute.Sortable` type. (#4734)
- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortable` function. (#4734)
- Deprecate `go.opentelemetry.io/otel/attribute.NewSetWithSortableFiltered` function. (#4734)
## [1.24.0/0.46.0/0.0.1-alpha] 2024-02-23
This release is the last to support [Go 1.20].
The next release will require at least [Go 1.21].
### Added
- Support [Go 1.22]. (#4890)
- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`. (#4900)
- Add exemplar support to `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`. (#4900)
- The `go.opentelemetry.io/otel/log` module is added.
This module includes OpenTelemetry Go's implementation of the Logs Bridge API.
This module is in an alpha state, it is subject to breaking changes.
See our [versioning policy](./VERSIONING.md) for more info. (#4961)
- ARM64 platform to the compatibility testing suite. (#4994)
### Fixed
- Fix registration of multiple callbacks when using the global meter provider from `go.opentelemetry.io/otel`. (#4945)
- Fix negative buckets in output of exponential histograms. (#4956)
## [1.23.1] 2024-02-07
### Fixed
- Register all callbacks passed during observable instrument creation instead of just the last one multiple times in `go.opentelemetry.io/otel/sdk/metric`. (#4888)
## [1.23.0] 2024-02-06
This release contains the first stable, `v1`, release of the following modules:
- `go.opentelemetry.io/otel/bridge/opencensus`
- `go.opentelemetry.io/otel/bridge/opencensus/test`
- `go.opentelemetry.io/otel/example/opencensus`
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
### Added
- Add `WithEndpointURL` option to the `exporters/otlp/otlpmetric/otlpmetricgrpc`, `exporters/otlp/otlpmetric/otlpmetrichttp`, `exporters/otlp/otlptrace/otlptracegrpc` and `exporters/otlp/otlptrace/otlptracehttp` packages. (#4808)
- Experimental exemplar exporting is added to the metric SDK.
See [metric documentation](./sdk/metric/internal/x/README.md#exemplars) for more information about this feature and how to enable it. (#4871)
- `ErrSchemaURLConflict` is added to `go.opentelemetry.io/otel/sdk/resource`.
This error is returned when a merge of two `Resource`s with different (non-empty) schema URL is attempted. (#4876)
### Changed
- The `Merge` and `New` functions in `go.opentelemetry.io/otel/sdk/resource` now returns a partial result if there is a schema URL merge conflict.
Instead of returning `nil` when two `Resource`s with different (non-empty) schema URLs are merged the merged `Resource`, along with the new `ErrSchemaURLConflict` error, is returned.
It is up to the user to decide if they want to use the returned `Resource` or not.
It may have desired attributes overwritten or include stale semantic conventions. (#4876)
### Fixed
- Fix `ContainerID` resource detection on systemd when cgroup path has a colon. (#4449)
- Fix `go.opentelemetry.io/otel/sdk/metric` to cache instruments to avoid leaking memory when the same instrument is created multiple times. (#4820)
- Fix missing `Mix` and `Max` values for `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` by introducing `MarshalText` and `MarshalJSON` for the `Extrema` type in `go.opentelemetry.io/sdk/metric/metricdata`. (#4827)
## [1.23.0-rc.1] 2024-01-18
This is a release candidate for the v1.23.0 release.
That release is expected to include the `v1` release of the following modules:
- `go.opentelemetry.io/otel/bridge/opencensus`
- `go.opentelemetry.io/otel/bridge/opencensus/test`
- `go.opentelemetry.io/otel/example/opencensus`
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc`
- `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp`
- `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric`
See our [versioning policy](VERSIONING.md) for more information about these stability guarantees.
## [1.22.0/0.45.0] 2024-01-17
### Added
- The `go.opentelemetry.io/otel/semconv/v1.22.0` package.
The package contains semantic conventions from the `v1.22.0` version of the OpenTelemetry Semantic Conventions. (#4735)
- The `go.opentelemetry.io/otel/semconv/v1.23.0` package.
The package contains semantic conventions from the `v1.23.0` version of the OpenTelemetry Semantic Conventions. (#4746)
- The `go.opentelemetry.io/otel/semconv/v1.23.1` package.
The package contains semantic conventions from the `v1.23.1` version of the OpenTelemetry Semantic Conventions. (#4749)
- The `go.opentelemetry.io/otel/semconv/v1.24.0` package.
The package contains semantic conventions from the `v1.24.0` version of the OpenTelemetry Semantic Conventions. (#4770)
- Add `WithResourceAsConstantLabels` option to apply resource attributes for every metric emitted by the Prometheus exporter. (#4733)
- Experimental cardinality limiting is added to the metric SDK.
See [metric documentation](./sdk/metric/internal/x/README.md#cardinality-limit) for more information about this feature and how to enable it. (#4457)
- Add `NewMemberRaw` and `NewKeyValuePropertyRaw` in `go.opentelemetry.io/otel/baggage`. (#4804)
### Changed
- Upgrade all use of `go.opentelemetry.io/otel/semconv` to use `v1.24.0`. (#4754)
- Update transformations in `go.opentelemetry.io/otel/exporters/zipkin` to follow `v1.24.0` version of the OpenTelemetry specification. (#4754)
- Record synchronous measurements when the passed context is canceled instead of dropping in `go.opentelemetry.io/otel/sdk/metric`.
If you do not want to make a measurement when the context is cancelled, you need to handle it yourself (e.g `if ctx.Err() != nil`). (#4671)
- Improve `go.opentelemetry.io/otel/trace.TraceState`'s performance. (#4722)
- Improve `go.opentelemetry.io/otel/propagation.TraceContext`'s performance. (#4721)
- Improve `go.opentelemetry.io/otel/baggage` performance. (#4743)
- Improve performance of the `(*Set).Filter` method in `go.opentelemetry.io/otel/attribute` when the passed filter does not filter out any attributes from the set. (#4774)
- `Member.String` in `go.opentelemetry.io/otel/baggage` percent-encodes only when necessary. (#4775)
- Improve `go.opentelemetry.io/otel/trace.Span`'s performance when adding multiple attributes. (#4818)
- `Property.Value` in `go.opentelemetry.io/otel/baggage` now returns a raw string instead of a percent-encoded value. (#4804)
### Fixed
- Fix `Parse` in `go.opentelemetry.io/otel/baggage` to validate member value before percent-decoding. (#4755)
- Fix whitespace encoding of `Member.String` in `go.opentelemetry.io/otel/baggage`. (#4756)
- Fix observable not registered error when the asynchronous instrument has a drop aggregation in `go.opentelemetry.io/otel/sdk/metric`. (#4772)
- Fix baggage item key so that it is not canonicalized in `go.opentelemetry.io/otel/bridge/opentracing`. (#4776)
- Fix `go.opentelemetry.io/otel/bridge/opentracing` to properly handle baggage values that requires escaping during propagation. (#4804)
- Fix a bug where using multiple readers resulted in incorrect asynchronous counter values in `go.opentelemetry.io/otel/sdk/metric`. (#4742)
## [1.21.0/0.44.0] 2023-11-16 ## [1.21.0/0.44.0] 2023-11-16
### Removed ### Removed
@ -2735,7 +2921,14 @@ It contains api and sdk for trace and meter.
- CircleCI build CI manifest files. - CircleCI build CI manifest files.
- CODEOWNERS file to track owners of this project. - CODEOWNERS file to track owners of this project.
[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.21.0...HEAD [Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.26.0...HEAD
[1.26.0/0.48.0/0.2.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.26.0
[1.25.0/0.47.0/0.0.8/0.1.0-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.25.0
[1.24.0/0.46.0/0.0.1-alpha]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.24.0
[1.23.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.1
[1.23.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0
[1.23.0-rc.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.23.0-rc.1
[1.22.0/0.45.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.22.0
[1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0 [1.21.0/0.44.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.21.0
[1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0 [1.20.0/0.43.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.20.0
[1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0 [1.19.0/0.42.0/0.0.7]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.19.0
@ -2809,6 +3002,8 @@ It contains api and sdk for trace and meter.
[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 [0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1
[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 [0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0
[Go 1.22]: https://go.dev/doc/go1.22
[Go 1.21]: https://go.dev/doc/go1.21
[Go 1.20]: https://go.dev/doc/go1.20 [Go 1.20]: https://go.dev/doc/go1.20
[Go 1.19]: https://go.dev/doc/go1.19 [Go 1.19]: https://go.dev/doc/go1.19
[Go 1.18]: https://go.dev/doc/go1.18 [Go 1.18]: https://go.dev/doc/go1.18
@ -2816,3 +3011,5 @@ It contains api and sdk for trace and meter.
[metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric [metric API]:https://pkg.go.dev/go.opentelemetry.io/otel/metric
[metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric [metric SDK]:https://pkg.go.dev/go.opentelemetry.io/otel/sdk/metric
[trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace [trace API]:https://pkg.go.dev/go.opentelemetry.io/otel/trace
[GO-2024-2687]: https://pkg.go.dev/vuln/GO-2024-2687

View File

@ -14,4 +14,4 @@
* @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu * @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @MadVikingGod @pellared @hanyuancheung @dmathieu
CODEOWNERS @MrAlias @MadVikingGod @pellared CODEOWNERS @MrAlias @MadVikingGod @pellared @dashpole

View File

@ -201,6 +201,16 @@ You can install and run a "local Go Doc site" in the following way:
[`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric) [`go.opentelemetry.io/otel/metric`](https://pkg.go.dev/go.opentelemetry.io/otel/metric)
is an example of a very well-documented package. is an example of a very well-documented package.
### README files
Each (non-internal, non-test, non-documentation) package must contain a
`README.md` file containing at least a title, and a `pkg.go.dev` badge.
The README should not be a repetition of Go doc comments.
You can verify the presence of all README files with the `make verify-readmes`
command.
## Style Guide ## Style Guide
One of the primary goals of this project is that it is actually used by One of the primary goals of this project is that it is actually used by
@ -591,25 +601,46 @@ this.
[^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548 [^3]: https://github.com/open-telemetry/opentelemetry-go/issues/3548
### Ignoring context cancellation
OpenTelemetry API implementations need to ignore the cancellation of the context that are
passed when recording a value (e.g. starting a span, recording a measurement, emitting a log).
Recording methods should not return an error describing the cancellation state of the context
when they complete, nor should they abort any work.
This rule may not apply if the OpenTelemetry specification defines a timeout mechanism for
the method. In that case the context cancellation can be used for the timeout with the
restriction that this behavior is documented for the method. Otherwise, timeouts
are expected to be handled by the user calling the API, not the implementation.
Stoppage of the telemetry pipeline is handled by calling the appropriate `Shutdown` method
of a provider. It is assumed the context passed from a user is not used for this purpose.
Outside of the direct recording of telemetry from the API (e.g. exporting telemetry,
force flushing telemetry, shutting down a signal provider) the context cancellation
should be honored. This means all work done on behalf of the user provided context
should be canceled.
## Approvers and Maintainers ## Approvers and Maintainers
### Approvers ### Approvers
- [Evan Torrie](https://github.com/evantorrie), Verizon Media - [Evan Torrie](https://github.com/evantorrie), Verizon Media
- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics - [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics
- [David Ashpole](https://github.com/dashpole), Google
- [Chester Cheung](https://github.com/hanyuancheung), Tencent - [Chester Cheung](https://github.com/hanyuancheung), Tencent
- [Damien Mathieu](https://github.com/dmathieu), Elastic - [Damien Mathieu](https://github.com/dmathieu), Elastic
- [Anthony Mirabella](https://github.com/Aneurysm9), AWS - [Anthony Mirabella](https://github.com/Aneurysm9), AWS
### Maintainers ### Maintainers
- [David Ashpole](https://github.com/dashpole), Google
- [Aaron Clawson](https://github.com/MadVikingGod), LightStep - [Aaron Clawson](https://github.com/MadVikingGod), LightStep
- [Robert Pająk](https://github.com/pellared), Splunk - [Robert Pająk](https://github.com/pellared), Splunk
- [Tyler Yahn](https://github.com/MrAlias), Splunk - [Tyler Yahn](https://github.com/MrAlias), Splunk
### Emeritus ### Emeritus
- [Liz Fong-Jones](https://github.com/lizthegrey), Honeycomb
- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep - [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep
- [Josh MacDonald](https://github.com/jmacd), LightStep - [Josh MacDonald](https://github.com/jmacd), LightStep

View File

@ -1,16 +1,5 @@
# Copyright The OpenTelemetry Authors # Copyright The OpenTelemetry Authors
# # SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
TOOLS_MOD_DIR := ./internal/tools TOOLS_MOD_DIR := ./internal/tools
@ -25,8 +14,8 @@ TIMEOUT = 60
.DEFAULT_GOAL := precommit .DEFAULT_GOAL := precommit
.PHONY: precommit ci .PHONY: precommit ci
precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix test-default precommit: generate dependabot-generate license-check misspell go-mod-tidy golangci-lint-fix verify-readmes test-default
ci: generate dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage ci: generate dependabot-check license-check lint vanity-import-check verify-readmes build test-default check-clean-work-tree test-coverage
# Tools # Tools
@ -34,7 +23,7 @@ TOOLS = $(CURDIR)/.tools
$(TOOLS): $(TOOLS):
@mkdir -p $@ @mkdir -p $@
$(TOOLS)/%: | $(TOOLS) $(TOOLS)/%: $(TOOLS_MOD_DIR)/go.mod | $(TOOLS)
cd $(TOOLS_MOD_DIR) && \ cd $(TOOLS_MOD_DIR) && \
$(GO) build -o $@ $(PACKAGE) $(GO) build -o $@ $(PACKAGE)
@ -110,7 +99,7 @@ $(PYTOOLS):
@$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip" @$(DOCKERPY) bash -c "python3 -m venv $(VENVDIR) && $(PIP) install --upgrade pip"
# Install python packages into the virtual environment. # Install python packages into the virtual environment.
$(PYTOOLS)/%: | $(PYTOOLS) $(PYTOOLS)/%: $(PYTOOLS)
@$(DOCKERPY) $(PIP) install -r requirements.txt @$(DOCKERPY) $(PIP) install -r requirements.txt
CODESPELL = $(PYTOOLS)/codespell CODESPELL = $(PYTOOLS)/codespell
@ -124,18 +113,18 @@ generate: go-generate vanity-import-fix
.PHONY: go-generate .PHONY: go-generate
go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%) go-generate: $(OTEL_GO_MOD_DIRS:%=go-generate/%)
go-generate/%: DIR=$* go-generate/%: DIR=$*
go-generate/%: | $(STRINGER) $(GOTMPL) go-generate/%: $(STRINGER) $(GOTMPL)
@echo "$(GO) generate $(DIR)/..." \ @echo "$(GO) generate $(DIR)/..." \
&& cd $(DIR) \ && cd $(DIR) \
&& PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && PATH="$(TOOLS):$${PATH}" $(GO) generate ./...
.PHONY: vanity-import-fix .PHONY: vanity-import-fix
vanity-import-fix: | $(PORTO) vanity-import-fix: $(PORTO)
@$(PORTO) --include-internal -w . @$(PORTO) --include-internal -w .
# Generate go.work file for local development. # Generate go.work file for local development.
.PHONY: go-work .PHONY: go-work
go-work: | $(CROSSLINK) go-work: $(CROSSLINK)
$(CROSSLINK) work --root=$(shell pwd) $(CROSSLINK) work --root=$(shell pwd)
# Build # Build
@ -178,7 +167,7 @@ test/%:
COVERAGE_MODE = atomic COVERAGE_MODE = atomic
COVERAGE_PROFILE = coverage.out COVERAGE_PROFILE = coverage.out
.PHONY: test-coverage .PHONY: test-coverage
test-coverage: | $(GOCOVMERGE) test-coverage: $(GOCOVMERGE)
@set -e; \ @set -e; \
printf "" > coverage.txt; \ printf "" > coverage.txt; \
for dir in $(ALL_COVERAGE_MOD_DIRS); do \ for dir in $(ALL_COVERAGE_MOD_DIRS); do \
@ -192,7 +181,7 @@ test-coverage: | $(GOCOVMERGE)
done; \ done; \
$(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt
# Adding a directory will include all benchmarks in that direcotry if a filter is not specified. # Adding a directory will include all benchmarks in that directory if a filter is not specified.
BENCHMARK_TARGETS := sdk/trace BENCHMARK_TARGETS := sdk/trace
.PHONY: benchmark .PHONY: benchmark
benchmark: $(BENCHMARK_TARGETS:%=benchmark/%) benchmark: $(BENCHMARK_TARGETS:%=benchmark/%)
@ -209,23 +198,23 @@ golangci-lint-fix: ARGS=--fix
golangci-lint-fix: golangci-lint golangci-lint-fix: golangci-lint
golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%)
golangci-lint/%: DIR=$* golangci-lint/%: DIR=$*
golangci-lint/%: | $(GOLANGCI_LINT) golangci-lint/%: $(GOLANGCI_LINT)
@echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \
&& cd $(DIR) \ && cd $(DIR) \
&& $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS)
.PHONY: crosslink .PHONY: crosslink
crosslink: | $(CROSSLINK) crosslink: $(CROSSLINK)
@echo "Updating intra-repository dependencies in all go modules" \ @echo "Updating intra-repository dependencies in all go modules" \
&& $(CROSSLINK) --root=$(shell pwd) --prune && $(CROSSLINK) --root=$(shell pwd) --prune
.PHONY: go-mod-tidy .PHONY: go-mod-tidy
go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%)
go-mod-tidy/%: DIR=$* go-mod-tidy/%: DIR=$*
go-mod-tidy/%: | crosslink go-mod-tidy/%: crosslink
@echo "$(GO) mod tidy in $(DIR)" \ @echo "$(GO) mod tidy in $(DIR)" \
&& cd $(DIR) \ && cd $(DIR) \
&& $(GO) mod tidy -compat=1.20 && $(GO) mod tidy -compat=1.21
.PHONY: lint-modules .PHONY: lint-modules
lint-modules: go-mod-tidy lint-modules: go-mod-tidy
@ -234,23 +223,23 @@ lint-modules: go-mod-tidy
lint: misspell lint-modules golangci-lint govulncheck lint: misspell lint-modules golangci-lint govulncheck
.PHONY: vanity-import-check .PHONY: vanity-import-check
vanity-import-check: | $(PORTO) vanity-import-check: $(PORTO)
@$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 ) @$(PORTO) --include-internal -l . || ( echo "(run: make vanity-import-fix)"; exit 1 )
.PHONY: misspell .PHONY: misspell
misspell: | $(MISSPELL) misspell: $(MISSPELL)
@$(MISSPELL) -w $(ALL_DOCS) @$(MISSPELL) -w $(ALL_DOCS)
.PHONY: govulncheck .PHONY: govulncheck
govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%) govulncheck: $(OTEL_GO_MOD_DIRS:%=govulncheck/%)
govulncheck/%: DIR=$* govulncheck/%: DIR=$*
govulncheck/%: | $(GOVULNCHECK) govulncheck/%: $(GOVULNCHECK)
@echo "govulncheck ./... in $(DIR)" \ @echo "govulncheck ./... in $(DIR)" \
&& cd $(DIR) \ && cd $(DIR) \
&& $(GOVULNCHECK) ./... && $(GOVULNCHECK) ./...
.PHONY: codespell .PHONY: codespell
codespell: | $(CODESPELL) codespell: $(CODESPELL)
@$(DOCKERPY) $(CODESPELL) @$(DOCKERPY) $(CODESPELL)
.PHONY: license-check .PHONY: license-check
@ -265,11 +254,11 @@ license-check:
DEPENDABOT_CONFIG = .github/dependabot.yml DEPENDABOT_CONFIG = .github/dependabot.yml
.PHONY: dependabot-check .PHONY: dependabot-check
dependabot-check: | $(DBOTCONF) dependabot-check: $(DBOTCONF)
@$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 ) @$(DBOTCONF) verify $(DEPENDABOT_CONFIG) || ( echo "(run: make dependabot-generate)"; exit 1 )
.PHONY: dependabot-generate .PHONY: dependabot-generate
dependabot-generate: | $(DBOTCONF) dependabot-generate: $(DBOTCONF)
@$(DBOTCONF) generate > $(DEPENDABOT_CONFIG) @$(DBOTCONF) generate > $(DEPENDABOT_CONFIG)
.PHONY: check-clean-work-tree .PHONY: check-clean-work-tree
@ -284,13 +273,14 @@ check-clean-work-tree:
SEMCONVPKG ?= "semconv/" SEMCONVPKG ?= "semconv/"
.PHONY: semconv-generate .PHONY: semconv-generate
semconv-generate: | $(SEMCONVGEN) $(SEMCONVKIT) semconv-generate: $(SEMCONVGEN) $(SEMCONVKIT)
[ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 ) [ "$(TAG)" ] || ( echo "TAG unset: missing opentelemetry semantic-conventions tag"; exit 1 )
[ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 ) [ "$(OTEL_SEMCONV_REPO)" ] || ( echo "OTEL_SEMCONV_REPO unset: missing path to opentelemetry semantic-conventions repo"; exit 1 )
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=span -p conventionType=trace -f trace.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=attribute_group -p conventionType=trace -f attribute_group.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=event -p conventionType=event -f event.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)" $(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=resource -p conventionType=resource -f resource.go -t "$(SEMCONVPKG)/template.j2" -s "$(TAG)"
$(SEMCONVGEN) -i "$(OTEL_SEMCONV_REPO)/model/." --only=metric -f metric.go -t "$(SEMCONVPKG)/metric_template.j2" -s "$(TAG)"
$(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)" $(SEMCONVKIT) -output "$(SEMCONVPKG)/$(TAG)" -tag "$(TAG)"
.PHONY: gorelease .PHONY: gorelease
@ -303,16 +293,20 @@ gorelease/%:| $(GORELEASE)
|| echo "" || echo ""
.PHONY: prerelease .PHONY: prerelease
prerelease: | $(MULTIMOD) prerelease: $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
$(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET}
COMMIT ?= "HEAD" COMMIT ?= "HEAD"
.PHONY: add-tags .PHONY: add-tags
add-tags: | $(MULTIMOD) add-tags: $(MULTIMOD)
@[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 )
$(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT}
.PHONY: lint-markdown .PHONY: lint-markdown
lint-markdown: lint-markdown:
docker run -v "$(CURDIR):$(WORKDIR)" docker://avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md docker run -v "$(CURDIR):$(WORKDIR)" avtodev/markdown-lint:v1 -c $(WORKDIR)/.markdownlint.yaml $(WORKDIR)/**/*.md
.PHONY: verify-readmes
verify-readmes:
./verify_readmes.sh

View File

@ -11,14 +11,11 @@ It provides a set of APIs to directly measure performance and behavior of your s
## Project Status ## Project Status
| Signal | Status | | Signal | Status |
|---------|------------| |---------|--------------------|
| Traces | Stable | | Traces | Stable |
| Metrics | Stable | | Metrics | Stable |
| Logs | Design [1] | | Logs | In development[^1] |
- [1]: Currently the logs signal development is in a design phase ([#4696](https://github.com/open-telemetry/opentelemetry-go/issues/4696)).
No Logs Pull Requests are currently being accepted.
Progress and status specific to this repository is tracked in our Progress and status specific to this repository is tracked in our
[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) [project boards](https://github.com/open-telemetry/opentelemetry-go/projects)
@ -28,6 +25,8 @@ and
Project versioning information and stability guarantees can be found in the Project versioning information and stability guarantees can be found in the
[versioning documentation](VERSIONING.md). [versioning documentation](VERSIONING.md).
[^1]: https://github.com/orgs/open-telemetry/projects/43
### Compatibility ### Compatibility
OpenTelemetry-Go ensures compatibility with the current supported versions of OpenTelemetry-Go ensures compatibility with the current supported versions of
@ -50,23 +49,25 @@ Currently, this project supports the following environments.
| OS | Go Version | Architecture | | OS | Go Version | Architecture |
|---------|------------|--------------| |---------|------------|--------------|
| Ubuntu | 1.22 | amd64 |
| Ubuntu | 1.21 | amd64 | | Ubuntu | 1.21 | amd64 |
| Ubuntu | 1.20 | amd64 | | Ubuntu | 1.22 | 386 |
| Ubuntu | 1.21 | 386 | | Ubuntu | 1.21 | 386 |
| Ubuntu | 1.20 | 386 | | Linux | 1.22 | arm64 |
| Linux | 1.21 | arm64 |
| MacOS | 1.22 | amd64 |
| MacOS | 1.21 | amd64 | | MacOS | 1.21 | amd64 |
| MacOS | 1.20 | amd64 | | Windows | 1.22 | amd64 |
| Windows | 1.21 | amd64 | | Windows | 1.21 | amd64 |
| Windows | 1.20 | amd64 | | Windows | 1.22 | 386 |
| Windows | 1.21 | 386 | | Windows | 1.21 | 386 |
| Windows | 1.20 | 386 |
While this project should work for other systems, no compatibility guarantees While this project should work for other systems, no compatibility guarantees
are made for those systems currently. are made for those systems currently.
## Getting Started ## Getting Started
You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/languages/go/getting-started/).
OpenTelemetry's goal is to provide a single set of APIs to capture distributed OpenTelemetry's goal is to provide a single set of APIs to capture distributed
traces and metrics from your application and send them to an observability traces and metrics from your application and send them to an observability

View File

@ -123,12 +123,12 @@ Once verified be sure to [make a release for the `contrib` repository](https://g
### Website Documentation ### Website Documentation
Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/instrumentation/go]. Update the [Go instrumentation documentation] in the OpenTelemetry website under [content/en/docs/languages/go].
Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate.
[OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions [OpenTelemetry Semantic Conventions]: https://github.com/open-telemetry/semantic-conventions
[Go instrumentation documentation]: https://opentelemetry.io/docs/instrumentation/go/ [Go instrumentation documentation]: https://opentelemetry.io/docs/languages/go/
[content/en/docs/instrumentation/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/instrumentation/go [content/en/docs/languages/go]: https://github.com/open-telemetry/opentelemetry.io/tree/main/content/en/docs/languages/go
### Demo Repository ### Demo Repository

3
vendor/go.opentelemetry.io/otel/attribute/README.md generated vendored Normal file
View File

@ -0,0 +1,3 @@
# Attribute
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/attribute)](https://pkg.go.dev/go.opentelemetry.io/otel/attribute)

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package attribute provides key and value attributes. // Package attribute provides key and value attributes.
package attribute // import "go.opentelemetry.io/otel/attribute" package attribute // import "go.opentelemetry.io/otel/attribute"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute" package attribute // import "go.opentelemetry.io/otel/attribute"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute" package attribute // import "go.opentelemetry.io/otel/attribute"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute" package attribute // import "go.opentelemetry.io/otel/attribute"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute" package attribute // import "go.opentelemetry.io/otel/attribute"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute" package attribute // import "go.opentelemetry.io/otel/attribute"

View File

@ -1,24 +1,14 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute" package attribute // import "go.opentelemetry.io/otel/attribute"
import ( import (
"cmp"
"encoding/json" "encoding/json"
"reflect" "reflect"
"slices"
"sort" "sort"
"sync"
) )
type ( type (
@ -26,23 +16,33 @@ type (
// immutable set of attributes, with an internal cache for storing // immutable set of attributes, with an internal cache for storing
// attribute encodings. // attribute encodings.
// //
// This type supports the Equivalent method of comparison using values of // This type will remain comparable for backwards compatibility. The
// type Distinct. // equivalence of Sets across versions is not guaranteed to be stable.
// Prior versions may find two Sets to be equal or not when compared
// directly (i.e. ==), but subsequent versions may not. Users should use
// the Equals method to ensure stable equivalence checking.
//
// Users should also use the Distinct returned from Equivalent as a map key
// instead of a Set directly. In addition to that type providing guarantees
// on stable equivalence, it may also provide performance improvements.
Set struct { Set struct {
equivalent Distinct equivalent Distinct
} }
// Distinct wraps a variable-size array of KeyValue, constructed with keys // Distinct is a unique identifier of a Set.
// in sorted order. This can be used as a map key or for equality checking //
// between Sets. // Distinct is designed to be ensures equivalence stability: comparisons
// will return the save value across versions. For this reason, Distinct
// should always be used as a map key instead of a Set.
Distinct struct { Distinct struct {
iface interface{} iface interface{}
} }
// Sortable implements sort.Interface, used for sorting KeyValue. This is // Sortable implements sort.Interface, used for sorting KeyValue.
// an exported type to support a memory optimization. A pointer to one of //
// these is needed for the call to sort.Stable(), which the caller may // Deprecated: This type is no longer used. It was added as a performance
// provide in order to avoid an allocation. See NewSetWithSortable(). // optimization for Go < 1.21 that is no longer needed (Go < 1.21 is no
// longer supported by the module).
Sortable []KeyValue Sortable []KeyValue
) )
@ -56,12 +56,6 @@ var (
iface: [0]KeyValue{}, iface: [0]KeyValue{},
}, },
} }
// sortables is a pool of Sortables used to create Sets with a user does
// not provide one.
sortables = sync.Pool{
New: func() interface{} { return new(Sortable) },
}
) )
// EmptySet returns a reference to a Set with no elements. // EmptySet returns a reference to a Set with no elements.
@ -187,13 +181,7 @@ func empty() Set {
// Except for empty sets, this method adds an additional allocation compared // Except for empty sets, this method adds an additional allocation compared
// with calls that include a Sortable. // with calls that include a Sortable.
func NewSet(kvs ...KeyValue) Set { func NewSet(kvs ...KeyValue) Set {
// Check for empty set. s, _ := NewSetWithFiltered(kvs, nil)
if len(kvs) == 0 {
return empty()
}
srt := sortables.Get().(*Sortable)
s, _ := NewSetWithSortableFiltered(kvs, srt, nil)
sortables.Put(srt)
return s return s
} }
@ -201,12 +189,10 @@ func NewSet(kvs ...KeyValue) Set {
// NewSetWithSortableFiltered for more details. // NewSetWithSortableFiltered for more details.
// //
// This call includes a Sortable option as a memory optimization. // This call includes a Sortable option as a memory optimization.
func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { //
// Check for empty set. // Deprecated: Use [NewSet] instead.
if len(kvs) == 0 { func NewSetWithSortable(kvs []KeyValue, _ *Sortable) Set {
return empty() s, _ := NewSetWithFiltered(kvs, nil)
}
s, _ := NewSetWithSortableFiltered(kvs, tmp, nil)
return s return s
} }
@ -220,10 +206,37 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
if len(kvs) == 0 { if len(kvs) == 0 {
return empty(), nil return empty(), nil
} }
srt := sortables.Get().(*Sortable)
s, filtered := NewSetWithSortableFiltered(kvs, srt, filter) // Stable sort so the following de-duplication can implement
sortables.Put(srt) // last-value-wins semantics.
return s, filtered slices.SortStableFunc(kvs, func(a, b KeyValue) int {
return cmp.Compare(a.Key, b.Key)
})
position := len(kvs) - 1
offset := position - 1
// The requirements stated above require that the stable
// result be placed in the end of the input slice, while
// overwritten values are swapped to the beginning.
//
// De-duplicate with last-value-wins semantics. Preserve
// duplicate values at the beginning of the input slice.
for ; offset >= 0; offset-- {
if kvs[offset].Key == kvs[position].Key {
continue
}
position--
kvs[offset], kvs[position] = kvs[position], kvs[offset]
}
kvs = kvs[position:]
if filter != nil {
if div := filteredToFront(kvs, filter); div != 0 {
return Set{equivalent: computeDistinct(kvs[div:])}, kvs[:div]
}
}
return Set{equivalent: computeDistinct(kvs)}, nil
} }
// NewSetWithSortableFiltered returns a new Set. // NewSetWithSortableFiltered returns a new Set.
@ -249,82 +262,71 @@ func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) {
// //
// The second []KeyValue return value is a list of attributes that were // The second []KeyValue return value is a list of attributes that were
// excluded by the Filter (if non-nil). // excluded by the Filter (if non-nil).
func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { //
// Check for empty set. // Deprecated: Use [NewSetWithFiltered] instead.
if len(kvs) == 0 { func NewSetWithSortableFiltered(kvs []KeyValue, _ *Sortable, filter Filter) (Set, []KeyValue) {
return empty(), nil return NewSetWithFiltered(kvs, filter)
}
*tmp = kvs
// Stable sort so the following de-duplication can implement
// last-value-wins semantics.
sort.Stable(tmp)
*tmp = nil
position := len(kvs) - 1
offset := position - 1
// The requirements stated above require that the stable
// result be placed in the end of the input slice, while
// overwritten values are swapped to the beginning.
//
// De-duplicate with last-value-wins semantics. Preserve
// duplicate values at the beginning of the input slice.
for ; offset >= 0; offset-- {
if kvs[offset].Key == kvs[position].Key {
continue
}
position--
kvs[offset], kvs[position] = kvs[position], kvs[offset]
}
if filter != nil {
return filterSet(kvs[position:], filter)
}
return Set{
equivalent: computeDistinct(kvs[position:]),
}, nil
} }
// filterSet reorders kvs so that included keys are contiguous at the end of // filteredToFront filters slice in-place using keep function. All KeyValues that need to
// the slice, while excluded keys precede the included keys. // be removed are moved to the front. All KeyValues that need to be kept are
func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { // moved (in-order) to the back. The index for the first KeyValue to be kept is
var excluded []KeyValue // returned.
func filteredToFront(slice []KeyValue, keep Filter) int {
// Move attributes that do not match the filter so they're adjacent before n := len(slice)
// calling computeDistinct(). j := n
distinctPosition := len(kvs) for i := n - 1; i >= 0; i-- {
if keep(slice[i]) {
// Swap indistinct keys forward and distinct keys toward the j--
// end of the slice. slice[i], slice[j] = slice[j], slice[i]
offset := len(kvs) - 1
for ; offset >= 0; offset-- {
if filter(kvs[offset]) {
distinctPosition--
kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset]
continue
} }
} }
excluded = kvs[:distinctPosition] return j
return Set{
equivalent: computeDistinct(kvs[distinctPosition:]),
}, excluded
} }
// Filter returns a filtered copy of this Set. See the documentation for // Filter returns a filtered copy of this Set. See the documentation for
// NewSetWithSortableFiltered for more details. // NewSetWithSortableFiltered for more details.
func (l *Set) Filter(re Filter) (Set, []KeyValue) { func (l *Set) Filter(re Filter) (Set, []KeyValue) {
if re == nil { if re == nil {
return Set{ return *l, nil
equivalent: l.equivalent,
}, nil
} }
// Note: This could be refactored to avoid the temporary slice // Iterate in reverse to the first attribute that will be filtered out.
// allocation, if it proves to be expensive. n := l.Len()
return filterSet(l.ToSlice(), re) first := n - 1
for ; first >= 0; first-- {
kv, _ := l.Get(first)
if !re(kv) {
break
}
}
// No attributes will be dropped, return the immutable Set l and nil.
if first < 0 {
return *l, nil
}
// Copy now that we know we need to return a modified set.
//
// Do not do this in-place on the underlying storage of *Set l. Sets are
// immutable and filtering should not change this.
slice := l.ToSlice()
// Don't re-iterate the slice if only slice[0] is filtered.
if first == 0 {
// It is safe to assume len(slice) >= 1 given we found at least one
// attribute above that needs to be filtered out.
return Set{equivalent: computeDistinct(slice[1:])}, slice[:1]
}
// Move the filtered slice[first] to the front (preserving order).
kv := slice[first]
copy(slice[1:first+1], slice[:first])
slice[0] = kv
// Do not re-evaluate re(slice[first+1:]).
div := filteredToFront(slice[1:first+1], re) + 1
return Set{equivalent: computeDistinct(slice[div:])}, slice[:div]
} }
// computeDistinct returns a Distinct using either the fixed- or // computeDistinct returns a Distinct using either the fixed- or
@ -404,7 +406,7 @@ func (l *Set) MarshalJSON() ([]byte, error) {
return json.Marshal(l.equivalent.iface) return json.Marshal(l.equivalent.iface)
} }
// MarshalLog is the marshaling function used by the logging system to represent this exporter. // MarshalLog is the marshaling function used by the logging system to represent this Set.
func (l Set) MarshalLog() interface{} { func (l Set) MarshalLog() interface{} {
kvs := make(map[string]string) kvs := make(map[string]string)
for _, kv := range l.ToSlice() { for _, kv := range l.ToSlice() {

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute // import "go.opentelemetry.io/otel/attribute" package attribute // import "go.opentelemetry.io/otel/attribute"

3
vendor/go.opentelemetry.io/otel/baggage/README.md generated vendored Normal file
View File

@ -0,0 +1,3 @@
# Baggage
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/baggage)](https://pkg.go.dev/go.opentelemetry.io/otel/baggage)

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage // import "go.opentelemetry.io/otel/baggage" package baggage // import "go.opentelemetry.io/otel/baggage"
@ -18,8 +7,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/url" "net/url"
"regexp"
"strings" "strings"
"unicode/utf8"
"go.opentelemetry.io/otel/internal/baggage" "go.opentelemetry.io/otel/internal/baggage"
) )
@ -32,16 +21,6 @@ const (
listDelimiter = "," listDelimiter = ","
keyValueDelimiter = "=" keyValueDelimiter = "="
propertyDelimiter = ";" propertyDelimiter = ";"
keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)`
valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)`
keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*`
)
var (
keyRe = regexp.MustCompile(`^` + keyDef + `$`)
valueRe = regexp.MustCompile(`^` + valueDef + `$`)
propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`)
) )
var ( var (
@ -67,7 +46,7 @@ type Property struct {
// //
// If key is invalid, an error will be returned. // If key is invalid, an error will be returned.
func NewKeyProperty(key string) (Property, error) { func NewKeyProperty(key string) (Property, error) {
if !keyRe.MatchString(key) { if !validateKey(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
} }
@ -77,14 +56,29 @@ func NewKeyProperty(key string) (Property, error) {
// NewKeyValueProperty returns a new Property for key with value. // NewKeyValueProperty returns a new Property for key with value.
// //
// If key or value are invalid, an error will be returned. // The passed key must be compliant with W3C Baggage specification.
// The passed value must be percent-encoded as defined in W3C Baggage specification.
//
// Notice: Consider using [NewKeyValuePropertyRaw] instead
// that does not require percent-encoding of the value.
func NewKeyValueProperty(key, value string) (Property, error) { func NewKeyValueProperty(key, value string) (Property, error) {
if !keyRe.MatchString(key) { if !validateValue(value) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
} }
decodedValue, err := url.PathUnescape(value)
if err != nil {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
return NewKeyValuePropertyRaw(key, decodedValue)
}
// NewKeyValuePropertyRaw returns a new Property for key with value.
//
// The passed key must be compliant with W3C Baggage specification.
func NewKeyValuePropertyRaw(key, value string) (Property, error) {
if !validateKey(key) {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
p := Property{ p := Property{
key: key, key: key,
@ -106,20 +100,11 @@ func parseProperty(property string) (Property, error) {
return newInvalidProperty(), nil return newInvalidProperty(), nil
} }
match := propertyRe.FindStringSubmatch(property) p, ok := parsePropertyInternal(property)
if len(match) != 4 { if !ok {
return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property)
} }
var p Property
if match[1] != "" {
p.key = match[1]
} else {
p.key = match[2]
p.value = match[3]
p.hasValue = true
}
return p, nil return p, nil
} }
@ -130,12 +115,9 @@ func (p Property) validate() error {
return fmt.Errorf("invalid property: %w", err) return fmt.Errorf("invalid property: %w", err)
} }
if !keyRe.MatchString(p.key) { if !validateKey(p.key) {
return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key))
} }
if p.hasValue && !valueRe.MatchString(p.value) {
return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value))
}
if !p.hasValue && p.value != "" { if !p.hasValue && p.value != "" {
return errFunc(errors.New("inconsistent value")) return errFunc(errors.New("inconsistent value"))
} }
@ -154,11 +136,11 @@ func (p Property) Value() (string, bool) {
return p.value, p.hasValue return p.value, p.hasValue
} }
// String encodes Property into a string compliant with the W3C Baggage // String encodes Property into a header string compliant with the W3C Baggage
// specification. // specification.
func (p Property) String() string { func (p Property) String() string {
if p.hasValue { if p.hasValue {
return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, valueEscape(p.value))
} }
return p.key return p.key
} }
@ -218,7 +200,7 @@ func (p properties) validate() error {
return nil return nil
} }
// String encodes properties into a string compliant with the W3C Baggage // String encodes properties into a header string compliant with the W3C Baggage
// specification. // specification.
func (p properties) String() string { func (p properties) String() string {
props := make([]string, len(p)) props := make([]string, len(p))
@ -240,11 +222,28 @@ type Member struct {
hasData bool hasData bool
} }
// NewMember returns a new Member from the passed arguments. The key will be // NewMember returns a new Member from the passed arguments.
// used directly while the value will be url decoded after validation. An error //
// is returned if the created Member would be invalid according to the W3C // The passed key must be compliant with W3C Baggage specification.
// Baggage specification. // The passed value must be percent-encoded as defined in W3C Baggage specification.
//
// Notice: Consider using [NewMemberRaw] instead
// that does not require percent-encoding of the value.
func NewMember(key, value string, props ...Property) (Member, error) { func NewMember(key, value string, props ...Property) (Member, error) {
if !validateValue(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
decodedValue, err := url.PathUnescape(value)
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
return NewMemberRaw(key, decodedValue, props...)
}
// NewMemberRaw returns a new Member from the passed arguments.
//
// The passed key must be compliant with W3C Baggage specification.
func NewMemberRaw(key, value string, props ...Property) (Member, error) {
m := Member{ m := Member{
key: key, key: key,
value: value, value: value,
@ -254,11 +253,6 @@ func NewMember(key, value string, props ...Property) (Member, error) {
if err := m.validate(); err != nil { if err := m.validate(); err != nil {
return newInvalidMember(), err return newInvalidMember(), err
} }
decodedValue, err := url.PathUnescape(value)
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
m.value = decodedValue
return m, nil return m, nil
} }
@ -274,11 +268,7 @@ func parseMember(member string) (Member, error) {
return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n)
} }
var ( var props properties
key, value string
props properties
)
keyValue, properties, found := strings.Cut(member, propertyDelimiter) keyValue, properties, found := strings.Cut(member, propertyDelimiter)
if found { if found {
// Parse the member properties. // Parse the member properties.
@ -299,36 +289,34 @@ func parseMember(member string) (Member, error) {
} }
// "Leading and trailing whitespaces are allowed but MUST be trimmed // "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure." // when converting the header into a data structure."
key = strings.TrimSpace(k) key := strings.TrimSpace(k)
var err error if !validateKey(key) {
value, err = url.PathUnescape(strings.TrimSpace(v))
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
}
if !keyRe.MatchString(key) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
} }
if !valueRe.MatchString(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) val := strings.TrimSpace(v)
if !validateValue(val) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, v)
} }
// Decode a percent-encoded value.
value, err := url.PathUnescape(val)
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %v", errInvalidValue, err)
}
return Member{key: key, value: value, properties: props, hasData: true}, nil return Member{key: key, value: value, properties: props, hasData: true}, nil
} }
// validate ensures m conforms to the W3C Baggage specification. // validate ensures m conforms to the W3C Baggage specification.
// A key is just an ASCII string, but a value must be URL encoded UTF-8, // A key must be an ASCII string, returning an error otherwise.
// returning an error otherwise.
func (m Member) validate() error { func (m Member) validate() error {
if !m.hasData { if !m.hasData {
return fmt.Errorf("%w: %q", errInvalidMember, m) return fmt.Errorf("%w: %q", errInvalidMember, m)
} }
if !keyRe.MatchString(m.key) { if !validateKey(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key) return fmt.Errorf("%w: %q", errInvalidKey, m.key)
} }
if !valueRe.MatchString(m.value) {
return fmt.Errorf("%w: %q", errInvalidValue, m.value)
}
return m.properties.validate() return m.properties.validate()
} }
@ -341,11 +329,13 @@ func (m Member) Value() string { return m.value }
// Properties returns a copy of the Member properties. // Properties returns a copy of the Member properties.
func (m Member) Properties() []Property { return m.properties.Copy() } func (m Member) Properties() []Property { return m.properties.Copy() }
// String encodes Member into a string compliant with the W3C Baggage // String encodes Member into a header string compliant with the W3C Baggage
// specification. // specification.
func (m Member) String() string { func (m Member) String() string {
// A key is just an ASCII string, but a value is URL encoded UTF-8. // A key is just an ASCII string. A value is restricted to be
s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) // US-ASCII characters excluding CTLs, whitespace,
// DQUOTE, comma, semicolon, and backslash.
s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, valueEscape(m.value))
if len(m.properties) > 0 { if len(m.properties) > 0 {
s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
} }
@ -536,9 +526,8 @@ func (b Baggage) Len() int {
return len(b.list) return len(b.list)
} }
// String encodes Baggage into a string compliant with the W3C Baggage // String encodes Baggage into a header string compliant with the W3C Baggage
// specification. The returned string will be invalid if the Baggage contains // specification.
// any invalid list-members.
func (b Baggage) String() string { func (b Baggage) String() string {
members := make([]string, 0, len(b.list)) members := make([]string, 0, len(b.list))
for k, v := range b.list { for k, v := range b.list {
@ -550,3 +539,372 @@ func (b Baggage) String() string {
} }
return strings.Join(members, listDelimiter) return strings.Join(members, listDelimiter)
} }
// parsePropertyInternal attempts to decode a Property from the passed string.
// It follows the spec at https://www.w3.org/TR/baggage/#definition.
func parsePropertyInternal(s string) (p Property, ok bool) {
// For the entire function we will use " key = value " as an example.
// Attempting to parse the key.
// First skip spaces at the beginning "< >key = value " (they could be empty).
index := skipSpace(s, 0)
// Parse the key: " <key> = value ".
keyStart := index
keyEnd := index
for _, c := range s[keyStart:] {
if !validateKeyChar(c) {
break
}
keyEnd++
}
// If we couldn't find any valid key character,
// it means the key is either empty or invalid.
if keyStart == keyEnd {
return
}
// Skip spaces after the key: " key< >= value ".
index = skipSpace(s, keyEnd)
if index == len(s) {
// A key can have no value, like: " key ".
ok = true
p.key = s[keyStart:keyEnd]
return
}
// If we have not reached the end and we can't find the '=' delimiter,
// it means the property is invalid.
if s[index] != keyValueDelimiter[0] {
return
}
// Attempting to parse the value.
// Match: " key =< >value ".
index = skipSpace(s, index+1)
// Match the value string: " key = <value> ".
// A valid property can be: " key =".
// Therefore, we don't have to check if the value is empty.
valueStart := index
valueEnd := index
for _, c := range s[valueStart:] {
if !validateValueChar(c) {
break
}
valueEnd++
}
// Skip all trailing whitespaces: " key = value< >".
index = skipSpace(s, valueEnd)
// If after looking for the value and skipping whitespaces
// we have not reached the end, it means the property is
// invalid, something like: " key = value value1".
if index != len(s) {
return
}
// Decode a percent-encoded value.
value, err := url.PathUnescape(s[valueStart:valueEnd])
if err != nil {
return
}
ok = true
p.key = s[keyStart:keyEnd]
p.hasValue = true
p.value = value
return
}
func skipSpace(s string, offset int) int {
i := offset
for ; i < len(s); i++ {
c := s[i]
if c != ' ' && c != '\t' {
break
}
}
return i
}
var safeKeyCharset = [utf8.RuneSelf]bool{
// 0x23 to 0x27
'#': true,
'$': true,
'%': true,
'&': true,
'\'': true,
// 0x30 to 0x39
'0': true,
'1': true,
'2': true,
'3': true,
'4': true,
'5': true,
'6': true,
'7': true,
'8': true,
'9': true,
// 0x41 to 0x5a
'A': true,
'B': true,
'C': true,
'D': true,
'E': true,
'F': true,
'G': true,
'H': true,
'I': true,
'J': true,
'K': true,
'L': true,
'M': true,
'N': true,
'O': true,
'P': true,
'Q': true,
'R': true,
'S': true,
'T': true,
'U': true,
'V': true,
'W': true,
'X': true,
'Y': true,
'Z': true,
// 0x5e to 0x7a
'^': true,
'_': true,
'`': true,
'a': true,
'b': true,
'c': true,
'd': true,
'e': true,
'f': true,
'g': true,
'h': true,
'i': true,
'j': true,
'k': true,
'l': true,
'm': true,
'n': true,
'o': true,
'p': true,
'q': true,
'r': true,
's': true,
't': true,
'u': true,
'v': true,
'w': true,
'x': true,
'y': true,
'z': true,
// remainder
'!': true,
'*': true,
'+': true,
'-': true,
'.': true,
'|': true,
'~': true,
}
func validateKey(s string) bool {
if len(s) == 0 {
return false
}
for _, c := range s {
if !validateKeyChar(c) {
return false
}
}
return true
}
func validateKeyChar(c int32) bool {
return c >= 0 && c <= int32(utf8.RuneSelf) && safeKeyCharset[c]
}
func validateValue(s string) bool {
for _, c := range s {
if !validateValueChar(c) {
return false
}
}
return true
}
var safeValueCharset = [utf8.RuneSelf]bool{
'!': true, // 0x21
// 0x23 to 0x2b
'#': true,
'$': true,
'%': true,
'&': true,
'\'': true,
'(': true,
')': true,
'*': true,
'+': true,
// 0x2d to 0x3a
'-': true,
'.': true,
'/': true,
'0': true,
'1': true,
'2': true,
'3': true,
'4': true,
'5': true,
'6': true,
'7': true,
'8': true,
'9': true,
':': true,
// 0x3c to 0x5b
'<': true, // 0x3C
'=': true, // 0x3D
'>': true, // 0x3E
'?': true, // 0x3F
'@': true, // 0x40
'A': true, // 0x41
'B': true, // 0x42
'C': true, // 0x43
'D': true, // 0x44
'E': true, // 0x45
'F': true, // 0x46
'G': true, // 0x47
'H': true, // 0x48
'I': true, // 0x49
'J': true, // 0x4A
'K': true, // 0x4B
'L': true, // 0x4C
'M': true, // 0x4D
'N': true, // 0x4E
'O': true, // 0x4F
'P': true, // 0x50
'Q': true, // 0x51
'R': true, // 0x52
'S': true, // 0x53
'T': true, // 0x54
'U': true, // 0x55
'V': true, // 0x56
'W': true, // 0x57
'X': true, // 0x58
'Y': true, // 0x59
'Z': true, // 0x5A
'[': true, // 0x5B
// 0x5d to 0x7e
']': true, // 0x5D
'^': true, // 0x5E
'_': true, // 0x5F
'`': true, // 0x60
'a': true, // 0x61
'b': true, // 0x62
'c': true, // 0x63
'd': true, // 0x64
'e': true, // 0x65
'f': true, // 0x66
'g': true, // 0x67
'h': true, // 0x68
'i': true, // 0x69
'j': true, // 0x6A
'k': true, // 0x6B
'l': true, // 0x6C
'm': true, // 0x6D
'n': true, // 0x6E
'o': true, // 0x6F
'p': true, // 0x70
'q': true, // 0x71
'r': true, // 0x72
's': true, // 0x73
't': true, // 0x74
'u': true, // 0x75
'v': true, // 0x76
'w': true, // 0x77
'x': true, // 0x78
'y': true, // 0x79
'z': true, // 0x7A
'{': true, // 0x7B
'|': true, // 0x7C
'}': true, // 0x7D
'~': true, // 0x7E
}
func validateValueChar(c int32) bool {
return c >= 0 && c <= int32(utf8.RuneSelf) && safeValueCharset[c]
}
// valueEscape escapes the string so it can be safely placed inside a baggage value,
// replacing special characters with %XX sequences as needed.
//
// The implementation is based on:
// https://github.com/golang/go/blob/f6509cf5cdbb5787061b784973782933c47f1782/src/net/url/url.go#L285.
func valueEscape(s string) string {
hexCount := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(c) {
hexCount++
}
}
if hexCount == 0 {
return s
}
var buf [64]byte
var t []byte
required := len(s) + 2*hexCount
if required <= len(buf) {
t = buf[:required]
} else {
t = make([]byte, required)
}
j := 0
for i := 0; i < len(s); i++ {
c := s[i]
if shouldEscape(s[i]) {
const upperhex = "0123456789ABCDEF"
t[j] = '%'
t[j+1] = upperhex[c>>4]
t[j+2] = upperhex[c&15]
j += 3
} else {
t[j] = c
j++
}
}
return string(t)
}
// shouldEscape returns true if the specified byte should be escaped when
// appearing in a baggage value string.
func shouldEscape(c byte) bool {
if c == '%' {
// The percent character must be encoded so that percent-encoding can work.
return true
}
return !validateValueChar(int32(c))
}

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage // import "go.opentelemetry.io/otel/baggage" package baggage // import "go.opentelemetry.io/otel/baggage"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Package baggage provides functionality for storing and retrieving Package baggage provides functionality for storing and retrieving

3
vendor/go.opentelemetry.io/otel/codes/README.md generated vendored Normal file
View File

@ -0,0 +1,3 @@
# Codes
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/codes)](https://pkg.go.dev/go.opentelemetry.io/otel/codes)

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package codes // import "go.opentelemetry.io/otel/codes" package codes // import "go.opentelemetry.io/otel/codes"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Package codes defines the canonical error codes used by OpenTelemetry. Package codes defines the canonical error codes used by OpenTelemetry.

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Package otel provides global access to the OpenTelemetry API. The subpackages of Package otel provides global access to the OpenTelemetry API. The subpackages of
@ -22,7 +11,7 @@ transmitted anywhere. An implementation of the OpenTelemetry SDK, like the
default SDK implementation (go.opentelemetry.io/otel/sdk), and associated default SDK implementation (go.opentelemetry.io/otel/sdk), and associated
exporters are used to process and transport this data. exporters are used to process and transport this data.
To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. To read the getting started guide, see https://opentelemetry.io/docs/languages/go/getting-started/.
To read more about tracing, see go.opentelemetry.io/otel/trace. To read more about tracing, see go.opentelemetry.io/otel/trace.

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otel // import "go.opentelemetry.io/otel" package otel // import "go.opentelemetry.io/otel"

View File

@ -0,0 +1,3 @@
# OTLP Trace Exporter
[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace)

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Package otlptrace contains abstractions for OTLP span exporters. Package otlptrace contains abstractions for OTLP span exporters.

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
@ -104,7 +93,7 @@ func NewUnstarted(client Client) *Exporter {
} }
} }
// MarshalLog is the marshaling function used by the logging system to represent this exporter. // MarshalLog is the marshaling function used by the logging system to represent this Exporter.
func (e *Exporter) MarshalLog() interface{} { func (e *Exporter) MarshalLog() interface{} {
return struct { return struct {
Type string Type string

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform"
@ -121,6 +110,7 @@ func span(sd tracesdk.ReadOnlySpan) *tracepb.Span {
if psid := sd.Parent().SpanID(); psid.IsValid() { if psid := sd.Parent().SpanID(); psid.IsValid() {
s.ParentSpanId = psid[:] s.ParentSpanId = psid[:]
} }
s.Flags = buildSpanFlags(sd.Parent())
return s return s
} }
@ -157,16 +147,28 @@ func links(links []tracesdk.Link) []*tracepb.Span_Link {
tid := otLink.SpanContext.TraceID() tid := otLink.SpanContext.TraceID()
sid := otLink.SpanContext.SpanID() sid := otLink.SpanContext.SpanID()
flags := buildSpanFlags(otLink.SpanContext)
sl = append(sl, &tracepb.Span_Link{ sl = append(sl, &tracepb.Span_Link{
TraceId: tid[:], TraceId: tid[:],
SpanId: sid[:], SpanId: sid[:],
Attributes: KeyValues(otLink.Attributes), Attributes: KeyValues(otLink.Attributes),
DroppedAttributesCount: uint32(otLink.DroppedAttributeCount), DroppedAttributesCount: uint32(otLink.DroppedAttributeCount),
Flags: flags,
}) })
} }
return sl return sl
} }
func buildSpanFlags(sc trace.SpanContext) uint32 {
flags := tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_HAS_IS_REMOTE_MASK
if sc.IsRemote() {
flags |= tracepb.SpanFlags_SPAN_FLAGS_CONTEXT_IS_REMOTE_MASK
}
return uint32(flags)
}
// spanEvents transforms span Events to an OTLP span events. // spanEvents transforms span Events to an OTLP span events.
func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event {
if len(es) == 0 { if len(es) == 0 {

View File

@ -1,20 +1,9 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace"
// Version is the current release version of the OpenTelemetry OTLP trace exporter in use. // Version is the current release version of the OpenTelemetry OTLP trace exporter in use.
func Version() string { func Version() string {
return "1.21.0" return "1.26.0"
} }

View File

@ -1,18 +1,7 @@
#!/usr/bin/env bash #!/usr/bin/env bash
# Copyright The OpenTelemetry Authors # Copyright The OpenTelemetry Authors
# # SPDX-License-Identifier: Apache-2.0
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -euo pipefail set -euo pipefail

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package otel // import "go.opentelemetry.io/otel" package otel // import "go.opentelemetry.io/otel"
@ -18,12 +7,8 @@ import (
"go.opentelemetry.io/otel/internal/global" "go.opentelemetry.io/otel/internal/global"
) )
var ( // Compile-time check global.ErrDelegator implements ErrorHandler.
// Compile-time check global.ErrDelegator implements ErrorHandler. var _ ErrorHandler = (*global.ErrDelegator)(nil)
_ ErrorHandler = (*global.ErrDelegator)(nil)
// Compile-time check global.ErrLogger implements ErrorHandler.
_ ErrorHandler = (*global.ErrLogger)(nil)
)
// GetErrorHandler returns the global ErrorHandler instance. // GetErrorHandler returns the global ErrorHandler instance.
// //
@ -44,5 +29,5 @@ func GetErrorHandler() ErrorHandler { return global.GetErrorHandler() }
// delegate errors to h. // delegate errors to h.
func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) } func SetErrorHandler(h ErrorHandler) { global.SetErrorHandler(h) }
// Handle is a convenience function for ErrorHandler().Handle(err). // Handle is a convenience function for GetErrorHandler().Handle(err).
func Handle(err error) { global.Handle(err) } func Handle(err error) { global.GetErrorHandler().Handle(err) }

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Package attribute provide several helper functions for some commonly used Package attribute provide several helper functions for some commonly used

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/* /*
Package baggage provides base types and functionality to store and retrieve Package baggage provides base types and functionality to store and retrieve

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package baggage // import "go.opentelemetry.io/otel/internal/baggage" package baggage // import "go.opentelemetry.io/otel/internal/baggage"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal // import "go.opentelemetry.io/otel/internal" package internal // import "go.opentelemetry.io/otel/internal"

View File

@ -1,38 +1,13 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/internal/global" package global // import "go.opentelemetry.io/otel/internal/global"
import ( import (
"log" "log"
"os"
"sync/atomic" "sync/atomic"
) )
var (
// GlobalErrorHandler provides an ErrorHandler that can be used
// throughout an OpenTelemetry instrumented project. When a user
// specified ErrorHandler is registered (`SetErrorHandler`) all calls to
// `Handle` and will be delegated to the registered ErrorHandler.
GlobalErrorHandler = defaultErrorHandler()
// Compile-time check that delegator implements ErrorHandler.
_ ErrorHandler = (*ErrDelegator)(nil)
// Compile-time check that errLogger implements ErrorHandler.
_ ErrorHandler = (*ErrLogger)(nil)
)
// ErrorHandler handles irremediable events. // ErrorHandler handles irremediable events.
type ErrorHandler interface { type ErrorHandler interface {
// Handle handles any error deemed irremediable by an OpenTelemetry // Handle handles any error deemed irremediable by an OpenTelemetry
@ -44,59 +19,18 @@ type ErrDelegator struct {
delegate atomic.Pointer[ErrorHandler] delegate atomic.Pointer[ErrorHandler]
} }
func (d *ErrDelegator) Handle(err error) { // Compile-time check that delegator implements ErrorHandler.
d.getDelegate().Handle(err) var _ ErrorHandler = (*ErrDelegator)(nil)
}
func (d *ErrDelegator) getDelegate() ErrorHandler { func (d *ErrDelegator) Handle(err error) {
return *d.delegate.Load() if eh := d.delegate.Load(); eh != nil {
(*eh).Handle(err)
return
}
log.Print(err)
} }
// setDelegate sets the ErrorHandler delegate. // setDelegate sets the ErrorHandler delegate.
func (d *ErrDelegator) setDelegate(eh ErrorHandler) { func (d *ErrDelegator) setDelegate(eh ErrorHandler) {
d.delegate.Store(&eh) d.delegate.Store(&eh)
} }
func defaultErrorHandler() *ErrDelegator {
d := &ErrDelegator{}
d.setDelegate(&ErrLogger{l: log.New(os.Stderr, "", log.LstdFlags)})
return d
}
// ErrLogger logs errors if no delegate is set, otherwise they are delegated.
type ErrLogger struct {
l *log.Logger
}
// Handle logs err if no delegate is set, otherwise it is delegated.
func (h *ErrLogger) Handle(err error) {
h.l.Print(err)
}
// GetErrorHandler returns the global ErrorHandler instance.
//
// The default ErrorHandler instance returned will log all errors to STDERR
// until an override ErrorHandler is set with SetErrorHandler. All
// ErrorHandler returned prior to this will automatically forward errors to
// the set instance instead of logging.
//
// Subsequent calls to SetErrorHandler after the first will not forward errors
// to the new ErrorHandler for prior returned instances.
func GetErrorHandler() ErrorHandler {
return GlobalErrorHandler
}
// SetErrorHandler sets the global ErrorHandler to h.
//
// The first time this is called all ErrorHandler previously returned from
// GetErrorHandler will send errors to h instead of the default logging
// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not
// delegate errors to h.
func SetErrorHandler(h ErrorHandler) {
GlobalErrorHandler.setDelegate(h)
}
// Handle is a convenience function for ErrorHandler().Handle(err).
func Handle(err error) {
GetErrorHandler().Handle(err)
}

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/internal/global" package global // import "go.opentelemetry.io/otel/internal/global"

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/internal/global" package global // import "go.opentelemetry.io/otel/internal/global"
@ -23,17 +12,20 @@ import (
"github.com/go-logr/stdr" "github.com/go-logr/stdr"
) )
// globalLogger is the logging interface used within the otel api and sdk provide details of the internals. // globalLogger holds a reference to the [logr.Logger] used within
// go.opentelemetry.io/otel.
// //
// The default logger uses stdr which is backed by the standard `log.Logger` // The default logger uses stdr which is backed by the standard `log.Logger`
// interface. This logger will only show messages at the Error Level. // interface. This logger will only show messages at the Error Level.
var globalLogger atomic.Pointer[logr.Logger] var globalLogger = func() *atomic.Pointer[logr.Logger] {
l := stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))
func init() { p := new(atomic.Pointer[logr.Logger])
SetLogger(stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) p.Store(&l)
} return p
}()
// SetLogger overrides the globalLogger with l. // SetLogger sets the global Logger to l.
// //
// To see Warn messages use a logger with `l.V(1).Enabled() == true` // To see Warn messages use a logger with `l.V(1).Enabled() == true`
// To see Info messages use a logger with `l.V(4).Enabled() == true` // To see Info messages use a logger with `l.V(4).Enabled() == true`
@ -42,28 +34,29 @@ func SetLogger(l logr.Logger) {
globalLogger.Store(&l) globalLogger.Store(&l)
} }
func getLogger() logr.Logger { // GetLogger returns the global logger.
func GetLogger() logr.Logger {
return *globalLogger.Load() return *globalLogger.Load()
} }
// Info prints messages about the general state of the API or SDK. // Info prints messages about the general state of the API or SDK.
// This should usually be less than 5 messages a minute. // This should usually be less than 5 messages a minute.
func Info(msg string, keysAndValues ...interface{}) { func Info(msg string, keysAndValues ...interface{}) {
getLogger().V(4).Info(msg, keysAndValues...) GetLogger().V(4).Info(msg, keysAndValues...)
} }
// Error prints messages about exceptional states of the API or SDK. // Error prints messages about exceptional states of the API or SDK.
func Error(err error, msg string, keysAndValues ...interface{}) { func Error(err error, msg string, keysAndValues ...interface{}) {
getLogger().Error(err, msg, keysAndValues...) GetLogger().Error(err, msg, keysAndValues...)
} }
// Debug prints messages about all internal changes in the API or SDK. // Debug prints messages about all internal changes in the API or SDK.
func Debug(msg string, keysAndValues ...interface{}) { func Debug(msg string, keysAndValues ...interface{}) {
getLogger().V(8).Info(msg, keysAndValues...) GetLogger().V(8).Info(msg, keysAndValues...)
} }
// Warn prints messages about warnings in the API or SDK. // Warn prints messages about warnings in the API or SDK.
// Not an error but is likely more important than an informational event. // Not an error but is likely more important than an informational event.
func Warn(msg string, keysAndValues ...interface{}) { func Warn(msg string, keysAndValues ...interface{}) {
getLogger().V(1).Info(msg, keysAndValues...) GetLogger().V(1).Info(msg, keysAndValues...)
} }

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/internal/global" package global // import "go.opentelemetry.io/otel/internal/global"
@ -130,9 +119,11 @@ func (m *meter) setDelegate(provider metric.MeterProvider) {
inst.setDelegate(meter) inst.setDelegate(meter)
} }
for e := m.registry.Front(); e != nil; e = e.Next() { var n *list.Element
for e := m.registry.Front(); e != nil; e = n {
r := e.Value.(*registration) r := e.Value.(*registration)
r.setDelegate(meter) r.setDelegate(meter)
n = e.Next()
m.registry.Remove(e) m.registry.Remove(e)
} }

View File

@ -1,16 +1,5 @@
// Copyright The OpenTelemetry Authors // Copyright The OpenTelemetry Authors
// // SPDX-License-Identifier: Apache-2.0
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package global // import "go.opentelemetry.io/otel/internal/global" package global // import "go.opentelemetry.io/otel/internal/global"

Some files were not shown because too many files have changed in this diff Show More