fix: Update go-sentry and go-oidc to address CVE's

This commit is contained in:
João "Pisco" Fernandes 2026-03-03 18:02:26 +00:00
parent 29b3a7aa7e
commit c0bc3bdbf0
72 changed files with 8261 additions and 1421 deletions

6
go.mod
View File

@ -3,15 +3,15 @@ module github.com/cloudflare/cloudflared
go 1.24.0
require (
github.com/coreos/go-oidc/v3 v3.10.0
github.com/coreos/go-oidc/v3 v3.17.0
github.com/coreos/go-systemd/v22 v22.5.0
github.com/facebookgo/grace v0.0.0-20180706040059-75cf19382434
github.com/fortytw2/leaktest v1.3.0
github.com/fsnotify/fsnotify v1.4.9
github.com/getsentry/sentry-go v0.16.0
github.com/getsentry/sentry-go v0.43.0
github.com/go-chi/chi/v5 v5.2.2
github.com/go-chi/cors v1.2.1
github.com/go-jose/go-jose/v4 v4.1.0
github.com/go-jose/go-jose/v4 v4.1.3
github.com/gobwas/ws v1.2.1
github.com/google/gopacket v1.1.19
github.com/google/uuid v1.6.0

12
go.sum
View File

@ -15,8 +15,8 @@ github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/coreos/go-oidc/v3 v3.10.0 h1:tDnXHnLyiTVyT/2zLDGj09pFPkhND8Gl8lnTRhoEaJU=
github.com/coreos/go-oidc/v3 v3.10.0/go.mod h1:5j11xcw0D3+SGxn6Z/WFADsgcWVMyNAlSQupk0KK3ac=
github.com/coreos/go-oidc/v3 v3.17.0 h1:hWBGaQfbi0iVviX4ibC7bk8OKT5qNr4klBaCHVNvehc=
github.com/coreos/go-oidc/v3 v3.17.0/go.mod h1:wqPbKFrVnE90vty060SB40FCJ8fTHTxSwyXJqZH+sI8=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd/v22 v22.5.0 h1:RrqgGjYQKalulkV8NGVIfkXQf6YYmOyiJKk8iXXhfZs=
github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
@ -43,8 +43,8 @@ github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/getsentry/sentry-go v0.16.0 h1:owk+S+5XcgJLlGR/3+3s6N4d+uKwqYvh/eS0AIMjPWo=
github.com/getsentry/sentry-go v0.16.0/go.mod h1:ZXCloQLj0pG7mja5NK6NPf2V4A88YJ4pNlc2mOHwh6Y=
github.com/getsentry/sentry-go v0.43.0 h1:XbXLpFicpo8HmBDaInk7dum18G9KSLcjZiyUKS+hLW4=
github.com/getsentry/sentry-go v0.43.0/go.mod h1:XDotiNZbgf5U8bPDUAfvcFmOnMQQceESxyKaObSssW0=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
@ -56,8 +56,8 @@ github.com/go-chi/cors v1.2.1 h1:xEC8UT3Rlp2QuWNEr4Fs/c2EAGVKBwy/1vHx3bppil4=
github.com/go-chi/cors v1.2.1/go.mod h1:sSbTewc+6wYHBBCW7ytsFSn836hqM7JxpglAy2Vzc58=
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og=
github.com/go-jose/go-jose/v4 v4.1.0 h1:cYSYxd3pw5zd2FSXk2vGdn9igQU2PS8MuxrCOCl0FdY=
github.com/go-jose/go-jose/v4 v4.1.0/go.mod h1:GG/vqmYm3Von2nYiB2vGTXzdoNKE5tix5tuc6iAd+sw=
github.com/go-jose/go-jose/v4 v4.1.3 h1:CVLmWDhDVRa6Mi/IgCgaopNosCaHz7zrMeF9MlZRkrs=
github.com/go-jose/go-jose/v4 v4.1.3/go.mod h1:x4oUasVrzR7071A4TnHLGSPpNOm2a21K9Kf04k1rs08=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=

View File

@ -11,7 +11,6 @@ import (
"io"
"net/http"
"sync"
"time"
jose "github.com/go-jose/go-jose/v4"
)
@ -57,22 +56,29 @@ func (s *StaticKeySet) VerifySignature(ctx context.Context, jwt string) ([]byte,
// The returned KeySet is a long lived verifier that caches keys based on any
// keys change. Reuse a common remote key set instead of creating new ones as needed.
func NewRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet {
return newRemoteKeySet(ctx, jwksURL, time.Now)
return newRemoteKeySet(ctx, jwksURL)
}
func newRemoteKeySet(ctx context.Context, jwksURL string, now func() time.Time) *RemoteKeySet {
if now == nil {
now = time.Now
func newRemoteKeySet(ctx context.Context, jwksURL string) *RemoteKeySet {
return &RemoteKeySet{
jwksURL: jwksURL,
// For historical reasons, this package uses contexts for configuration, not just
// cancellation. In hindsight, this was a bad idea.
//
// Attemps to reason about how cancels should work with background requests have
// largely lead to confusion. Use the context here as a config bag-of-values and
// ignore the cancel function.
ctx: context.WithoutCancel(ctx),
}
return &RemoteKeySet{jwksURL: jwksURL, ctx: ctx, now: now}
}
// RemoteKeySet is a KeySet implementation that validates JSON web tokens against
// a jwks_uri endpoint.
type RemoteKeySet struct {
jwksURL string
ctx context.Context
now func() time.Time
// Used for configuration. Cancelation is ignored.
ctx context.Context
// guard all other fields
mu sync.RWMutex

View File

@ -154,40 +154,65 @@ var supportedAlgorithms = map[string]bool{
EdDSA: true,
}
// ProviderConfig allows creating providers when discovery isn't supported. It's
// generally easier to use NewProvider directly.
// ProviderConfig allows direct creation of a [Provider] from metadata
// configuration. This is intended for interop with providers that don't support
// discovery, or host the JSON discovery document at an off-spec path.
//
// The ProviderConfig struct specifies JSON struct tags to support document
// parsing.
//
// // Directly fetch the metadata document.
// resp, err := http.Get("https://login.example.com/custom-metadata-path")
// if err != nil {
// // ...
// }
// defer resp.Body.Close()
//
// // Parse config from JSON metadata.
// config := &oidc.ProviderConfig{}
// if err := json.NewDecoder(resp.Body).Decode(config); err != nil {
// // ...
// }
// p := config.NewProvider(context.Background())
//
// For providers that implement discovery, use [NewProvider] instead.
//
// See: https://openid.net/specs/openid-connect-discovery-1_0.html
type ProviderConfig struct {
// IssuerURL is the identity of the provider, and the string it uses to sign
// ID tokens with. For example "https://accounts.google.com". This value MUST
// match ID tokens exactly.
IssuerURL string
IssuerURL string `json:"issuer"`
// AuthURL is the endpoint used by the provider to support the OAuth 2.0
// authorization endpoint.
AuthURL string
AuthURL string `json:"authorization_endpoint"`
// TokenURL is the endpoint used by the provider to support the OAuth 2.0
// token endpoint.
TokenURL string
TokenURL string `json:"token_endpoint"`
// DeviceAuthURL is the endpoint used by the provider to support the OAuth 2.0
// device authorization endpoint.
DeviceAuthURL string
DeviceAuthURL string `json:"device_authorization_endpoint"`
// UserInfoURL is the endpoint used by the provider to support the OpenID
// Connect UserInfo flow.
//
// https://openid.net/specs/openid-connect-core-1_0.html#UserInfo
UserInfoURL string
UserInfoURL string `json:"userinfo_endpoint"`
// JWKSURL is the endpoint used by the provider to advertise public keys to
// verify issued ID tokens. This endpoint is polled as new keys are made
// available.
JWKSURL string
JWKSURL string `json:"jwks_uri"`
// Algorithms, if provided, indicate a list of JWT algorithms allowed to sign
// ID tokens. If not provided, this defaults to the algorithms advertised by
// the JWK endpoint, then the set of algorithms supported by this package.
Algorithms []string
Algorithms []string `json:"id_token_signing_alg_values_supported"`
}
// NewProvider initializes a provider from a set of endpoints, rather than
// through discovery.
//
// The provided context is only used for [http.Client] configuration through
// [ClientContext], not cancelation.
func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
return &Provider{
issuer: p.IssuerURL,
@ -202,9 +227,14 @@ func (p *ProviderConfig) NewProvider(ctx context.Context) *Provider {
}
// NewProvider uses the OpenID Connect discovery mechanism to construct a Provider.
//
// The issuer is the URL identifier for the service. For example: "https://accounts.google.com"
// or "https://login.salesforce.com".
//
// OpenID Connect providers that don't implement discovery or host the discovery
// document at a non-spec complaint path (such as requiring a URL parameter),
// should use [ProviderConfig] instead.
//
// See: https://openid.net/specs/openid-connect-discovery-1_0.html
func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
wellKnown := strings.TrimSuffix(issuer, "/") + "/.well-known/openid-configuration"
req, err := http.NewRequest("GET", wellKnown, nil)
@ -237,7 +267,7 @@ func NewProvider(ctx context.Context, issuer string) (*Provider, error) {
issuerURL = issuer
}
if p.Issuer != issuerURL && !skipIssuerValidation {
return nil, fmt.Errorf("oidc: issuer did not match the issuer returned by provider, expected %q got %q", issuer, p.Issuer)
return nil, fmt.Errorf("oidc: issuer URL provided to client (%q) did not match the issuer URL returned by provider (%q)", issuer, p.Issuer)
}
var algs []string
for _, a := range p.Algorithms {

View File

@ -1,15 +1,11 @@
package oidc
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"errors"
"fmt"
"io"
"net/http"
"strings"
"time"
jose "github.com/go-jose/go-jose/v4"
@ -120,8 +116,8 @@ type Config struct {
}
// VerifierContext returns an IDTokenVerifier that uses the provider's key set to
// verify JWTs. As opposed to Verifier, the context is used for all requests to
// the upstream JWKs endpoint.
// verify JWTs. As opposed to Verifier, the context is used to configure requests
// to the upstream JWKs endpoint. The provided context's cancellation is ignored.
func (p *Provider) VerifierContext(ctx context.Context, config *Config) *IDTokenVerifier {
return p.newVerifier(NewRemoteKeySet(ctx, p.jwksURL), config)
}
@ -145,18 +141,6 @@ func (p *Provider) newVerifier(keySet KeySet, config *Config) *IDTokenVerifier {
return NewVerifier(p.issuer, keySet, config)
}
func parseJWT(p string) ([]byte, error) {
parts := strings.Split(p, ".")
if len(parts) < 2 {
return nil, fmt.Errorf("oidc: malformed jwt, expected 3 parts got %d", len(parts))
}
payload, err := base64.RawURLEncoding.DecodeString(parts[1])
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt payload: %v", err)
}
return payload, nil
}
func contains(sli []string, ele string) bool {
for _, s := range sli {
if s == ele {
@ -219,12 +203,49 @@ func resolveDistributedClaim(ctx context.Context, verifier *IDTokenVerifier, src
//
// token, err := verifier.Verify(ctx, rawIDToken)
func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDToken, error) {
// Throw out tokens with invalid claims before trying to verify the token. This lets
// us do cheap checks before possibly re-syncing keys.
payload, err := parseJWT(rawIDToken)
var supportedSigAlgs []jose.SignatureAlgorithm
for _, alg := range v.config.SupportedSigningAlgs {
supportedSigAlgs = append(supportedSigAlgs, jose.SignatureAlgorithm(alg))
}
if len(supportedSigAlgs) == 0 {
// If no algorithms were specified by both the config and discovery, default
// to the one mandatory algorithm "RS256".
supportedSigAlgs = []jose.SignatureAlgorithm{jose.RS256}
}
if v.config.InsecureSkipSignatureCheck {
// "none" is a required value to even parse a JWT with the "none" algorithm
// using go-jose.
supportedSigAlgs = append(supportedSigAlgs, "none")
}
// Parse and verify the signature first. This at least forces the user to have
// a valid, signed ID token before we do any other processing.
jws, err := jose.ParseSigned(rawIDToken, supportedSigAlgs)
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
}
switch len(jws.Signatures) {
case 0:
return nil, fmt.Errorf("oidc: id token not signed")
case 1:
default:
return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
}
sig := jws.Signatures[0]
var payload []byte
if v.config.InsecureSkipSignatureCheck {
// Yolo mode.
payload = jws.UnsafePayloadWithoutVerification()
} else {
// The JWT is attached here for the happy path to avoid the verifier from
// having to parse the JWT twice.
ctx = context.WithValue(ctx, parsedJWTKey, jws)
payload, err = v.keySet.VerifySignature(ctx, rawIDToken)
if err != nil {
return nil, fmt.Errorf("failed to verify signature: %v", err)
}
}
var token idToken
if err := json.Unmarshal(payload, &token); err != nil {
return nil, fmt.Errorf("oidc: failed to unmarshal claims: %v", err)
@ -254,6 +275,7 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok
AccessTokenHash: token.AtHash,
claims: payload,
distributedClaims: distributedClaims,
sigAlgorithm: sig.Header.Algorithm,
}
// Check issuer.
@ -306,45 +328,6 @@ func (v *IDTokenVerifier) Verify(ctx context.Context, rawIDToken string) (*IDTok
}
}
if v.config.InsecureSkipSignatureCheck {
return t, nil
}
var supportedSigAlgs []jose.SignatureAlgorithm
for _, alg := range v.config.SupportedSigningAlgs {
supportedSigAlgs = append(supportedSigAlgs, jose.SignatureAlgorithm(alg))
}
if len(supportedSigAlgs) == 0 {
// If no algorithms were specified by both the config and discovery, default
// to the one mandatory algorithm "RS256".
supportedSigAlgs = []jose.SignatureAlgorithm{jose.RS256}
}
jws, err := jose.ParseSigned(rawIDToken, supportedSigAlgs)
if err != nil {
return nil, fmt.Errorf("oidc: malformed jwt: %v", err)
}
switch len(jws.Signatures) {
case 0:
return nil, fmt.Errorf("oidc: id token not signed")
case 1:
default:
return nil, fmt.Errorf("oidc: multiple signatures on id token not supported")
}
sig := jws.Signatures[0]
t.sigAlgorithm = sig.Header.Algorithm
ctx = context.WithValue(ctx, parsedJWTKey, jws)
gotPayload, err := v.keySet.VerifySignature(ctx, rawIDToken)
if err != nil {
return nil, fmt.Errorf("failed to verify signature: %v", err)
}
// Ensure that the payload returned by the square actually matches the payload parsed earlier.
if !bytes.Equal(gotPayload, payload) {
return nil, errors.New("oidc: internal error, payload parsed did not match previous payload")
}
return t, nil
}

19
vendor/github.com/getsentry/sentry-go/.codecov.yml generated vendored Normal file
View File

@ -0,0 +1,19 @@
codecov:
# across
notify:
# Do not notify until at least this number of reports have been uploaded
# from the CI pipeline. We normally have more than that number, but 6
# should be enough to get a first notification.
after_n_builds: 6
coverage:
status:
project:
default:
# Do not fail the commit status if the coverage was reduced up to this value
threshold: 0.5%
patch:
default:
informational: true
ignore:
- "log_fallback.go"
- "internal/testutils"

View File

@ -1,11 +1,46 @@
minVersion: 0.23.1
changelogPolicy: simple
minVersion: 2.14.0
changelog:
policy: auto
versioning:
policy: auto
artifactProvider:
name: none
targets:
- name: github
includeNames: /none/
tagPrefix: v
- name: github
tagPrefix: otel/v
tagOnly: true
- name: github
tagPrefix: echo/v
tagOnly: true
- name: github
tagPrefix: fasthttp/v
tagOnly: true
- name: github
tagPrefix: fiber/v
tagOnly: true
- name: github
tagPrefix: gin/v
tagOnly: true
- name: github
tagPrefix: iris/v
tagOnly: true
- name: github
tagPrefix: negroni/v
tagOnly: true
- name: github
tagPrefix: logrus/v
tagOnly: true
- name: github
tagPrefix: slog/v
tagOnly: true
- name: github
tagPrefix: zerolog/v
tagOnly: true
- name: github
tagPrefix: zap/v
tagOnly: true
- name: registry
sdks:
github:getsentry/sentry-go:

View File

@ -1,4 +1,8 @@
# Code coverage artifacts
coverage.txt
coverage.out
coverage.html
.coverage/
# Just my personal way of tracking stuff — Kamil
FIXME.md
@ -8,3 +12,6 @@ TODO.md
# IDE system files
.idea
.vscode
# Local Claude Code settings that should not be committed
.claude/settings.local.json

View File

@ -1,22 +1,17 @@
version: "2"
linters:
disable-all: true
default: none
enable:
- bodyclose
- deadcode
- depguard
- dogsled
- dupl
- errcheck
- exportloopref
- gochecknoinits
- goconst
- gocritic
- gocyclo
- godot
- gofmt
- goimports
- gosec
- gosimple
- govet
- ineffassign
- misspell
@ -24,26 +19,44 @@ linters:
- prealloc
- revive
- staticcheck
- structcheck
- typecheck
- unconvert
- unparam
- unused
- varcheck
- whitespace
issues:
exclude-rules:
- path: _test\.go
linters:
- prealloc
- path: _test\.go
text: "G306:"
linters:
- gosec
- path: errors_test\.go
linters:
- unused
- path: http/example_test\.go
linters:
- errcheck
- bodyclose
exclusions:
generated: lax
presets:
- comments
- common-false-positives
- legacy
- std-error-handling
rules:
- linters:
- goconst
- prealloc
path: _test\.go
- linters:
- gosec
path: _test\.go
text: 'G306:'
- linters:
- unused
path: errors_test\.go
- linters:
- bodyclose
- errcheck
path: http/example_test\.go
paths:
- third_party$
- builtin$
- examples$
formatters:
enable:
- gofmt
- goimports
exclusions:
generated: lax
paths:
- third_party$
- builtin$
- examples$

File diff suppressed because it is too large Load Diff

View File

@ -72,6 +72,8 @@ $ go test -race -coverprofile=coverage.txt -covermode=atomic && go tool cover -h
## Linting
Lint with [`golangci-lint`](https://github.com/golangci/golangci-lint):
```console
$ golangci-lint run
```

View File

@ -1,9 +1,21 @@
Copyright (c) 2019 Sentry (https://sentry.io) and individual contributors.
All rights reserved.
MIT License
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
Copyright (c) 2019 Functional Software, Inc. dba Sentry
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

93
vendor/github.com/getsentry/sentry-go/Makefile generated vendored Normal file
View File

@ -0,0 +1,93 @@
.DEFAULT_GOAL := help
MKFILE_PATH := $(abspath $(lastword $(MAKEFILE_LIST)))
MKFILE_DIR := $(dir $(MKFILE_PATH))
ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort)
GO = go
TIMEOUT = 300
# Parse Makefile and display the help
help: ## Show help
@grep -E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | sort | awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
.PHONY: help
build: ## Build everything
for dir in $(ALL_GO_MOD_DIRS); do \
cd "$${dir}"; \
echo ">>> Running 'go build' for module: $${dir}"; \
go build ./...; \
done;
.PHONY: build
### Tests (inspired by https://github.com/open-telemetry/opentelemetry-go/blob/main/Makefile)
TEST_TARGETS := test-short test-verbose test-race
test-race: ARGS=-race
test-short: ARGS=-short
test-verbose: ARGS=-v -race
$(TEST_TARGETS): test
test: $(ALL_GO_MOD_DIRS:%=test/%) ## Run tests
test/%: DIR=$*
test/%:
@echo ">>> Running tests for module: $(DIR)"
@# We use '-count=1' to disable test caching.
(cd $(DIR) && $(GO) test -count=1 -timeout $(TIMEOUT)s $(ARGS) ./...)
.PHONY: $(TEST_TARGETS) test
# Coverage
COVERAGE_MODE = atomic
COVERAGE_PROFILE = coverage.out
COVERAGE_REPORT_DIR = .coverage
COVERAGE_REPORT_DIR_ABS = "$(MKFILE_DIR)/$(COVERAGE_REPORT_DIR)"
$(COVERAGE_REPORT_DIR):
mkdir -p $(COVERAGE_REPORT_DIR)
clean-report-dir: $(COVERAGE_REPORT_DIR)
test $(COVERAGE_REPORT_DIR) && rm -f $(COVERAGE_REPORT_DIR)/*
test-coverage: $(COVERAGE_REPORT_DIR) clean-report-dir ## Test with coverage enabled
set -e ; \
for dir in $(ALL_GO_MOD_DIRS); do \
echo ">>> Running tests with coverage for module: $${dir}"; \
DIR_ABS=$$(python -c 'import os, sys; print(os.path.realpath(sys.argv[1]))' $${dir}) ; \
REPORT_NAME=$$(basename $${DIR_ABS}); \
(cd "$${dir}" && \
$(GO) test -count=1 -timeout $(TIMEOUT)s -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" ./... && \
cp $(COVERAGE_PROFILE) "$(COVERAGE_REPORT_DIR_ABS)/$${REPORT_NAME}_$(COVERAGE_PROFILE)" && \
$(GO) tool cover -html=$(COVERAGE_PROFILE) -o coverage.html); \
done;
.PHONY: test-coverage clean-report-dir
test-race-coverage: $(COVERAGE_REPORT_DIR) clean-report-dir ## Run tests with race detection and coverage
set -e ; \
for dir in $(ALL_GO_MOD_DIRS); do \
echo ">>> Running tests with race detection and coverage for module: $${dir}"; \
DIR_ABS=$$(python -c 'import os, sys; print(os.path.realpath(sys.argv[1]))' $${dir}) ; \
REPORT_NAME=$$(basename $${DIR_ABS}); \
(cd "$${dir}" && \
$(GO) test -count=1 -timeout $(TIMEOUT)s -race -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" ./... && \
cp $(COVERAGE_PROFILE) "$(COVERAGE_REPORT_DIR_ABS)/$${REPORT_NAME}_$(COVERAGE_PROFILE)" && \
$(GO) tool cover -html=$(COVERAGE_PROFILE) -o coverage.html); \
done;
.PHONY: test-race-coverage
mod-tidy: ## Check go.mod tidiness
set -e ; \
for dir in $(ALL_GO_MOD_DIRS); do \
echo ">>> Running 'go mod tidy' for module: $${dir}"; \
(cd "$${dir}" && GOTOOLCHAIN=local go mod tidy -go=1.24.0 -compat=1.24.0); \
done; \
git diff --exit-code;
.PHONY: mod-tidy
vet: ## Run "go vet"
set -e ; \
for dir in $(ALL_GO_MOD_DIRS); do \
echo ">>> Running 'go vet' for module: $${dir}"; \
(cd "$${dir}" && go vet ./...); \
done;
.PHONY: vet
lint: ## Lint (using "golangci-lint")
golangci-lint run
.PHONY: lint
fmt: ## Format all Go files
gofmt -l -w -s .
.PHONY: fmt

View File

@ -10,10 +10,10 @@
# Official Sentry SDK for Go
[![Build Status](https://github.com/getsentry/sentry-go/workflows/go-workflow/badge.svg)](https://github.com/getsentry/sentry-go/actions?query=workflow%3Ago-workflow)
[![Build Status](https://github.com/getsentry/sentry-go/actions/workflows/test.yml/badge.svg)](https://github.com/getsentry/sentry-go/actions/workflows/test.yml)
[![Go Report Card](https://goreportcard.com/badge/github.com/getsentry/sentry-go)](https://goreportcard.com/report/github.com/getsentry/sentry-go)
[![Discord](https://img.shields.io/discord/621778831602221064)](https://discord.gg/Ww9hbqr)
[![GoDoc](https://godoc.org/github.com/getsentry/sentry-go?status.svg)](https://godoc.org/github.com/getsentry/sentry-go)
[![X Follow](https://img.shields.io/twitter/follow/sentry?label=sentry&style=social)](https://x.com/intent/follow?screen_name=sentry)
[![go.dev](https://img.shields.io/badge/go.dev-pkg-007d9c.svg?style=flat)](https://pkg.go.dev/github.com/getsentry/sentry-go)
`sentry-go` provides a Sentry client implementation for the Go programming
@ -42,7 +42,7 @@ though support for this configuration is best-effort.
$ go get github.com/getsentry/sentry-go@latest
```
Check out the [list of released versions](https://pkg.go.dev/github.com/getsentry/sentry-go?tab=versions).
Check out the [list of released versions](https://github.com/getsentry/sentry-go/releases).
## Configuration
@ -62,30 +62,32 @@ More on this in the [Configuration section of the official Sentry Go SDK documen
The SDK supports reporting errors and tracking application performance.
To get started, have a look at one of our [examples](example/):
- [Basic error instrumentation](example/basic/main.go)
- [Error and tracing for HTTP servers](example/http/main.go)
To get started, have a look at one of our [examples](_examples/):
- [Basic error instrumentation](_examples/basic/main.go)
- [Error and tracing for HTTP servers](_examples/http/main.go)
We also provide a [complete API reference](https://pkg.go.dev/github.com/getsentry/sentry-go).
For more detailed information about how to get the most out of `sentry-go`,
checkout the official documentation:
check out the official documentation:
- [Sentry Go SDK documentation](https://docs.sentry.io/platforms/go/)
- Guides:
- [net/http](https://docs.sentry.io/platforms/go/guides/http/)
- [echo](https://docs.sentry.io/platforms/go/guides/echo/)
- [fasthttp](https://docs.sentry.io/platforms/go/guides/fasthttp/)
- [fiber](https://docs.sentry.io/platforms/go/guides/fiber/)
- [gin](https://docs.sentry.io/platforms/go/guides/gin/)
- [iris](https://docs.sentry.io/platforms/go/guides/iris/)
- [martini](https://docs.sentry.io/platforms/go/guides/martini/)
- [logrus](https://docs.sentry.io/platforms/go/guides/logrus/)
- [negroni](https://docs.sentry.io/platforms/go/guides/negroni/)
- [slog](https://docs.sentry.io/platforms/go/guides/slog/)
- [zerolog](https://docs.sentry.io/platforms/go/guides/zerolog/)
## Resources
- [Bug Tracker](https://github.com/getsentry/sentry-go/issues)
- [GitHub Project](https://github.com/getsentry/sentry-go)
- [![GoDoc](https://godoc.org/github.com/getsentry/sentry-go?status.svg)](https://godoc.org/github.com/getsentry/sentry-go)
- [![go.dev](https://img.shields.io/badge/go.dev-pkg-007d9c.svg?style=flat)](https://pkg.go.dev/github.com/getsentry/sentry-go)
- [![Documentation](https://img.shields.io/badge/documentation-sentry.io-green.svg)](https://docs.sentry.io/platforms/go/)
- [![Discussions](https://img.shields.io/github/discussions/getsentry/sentry-go.svg)](https://github.com/getsentry/sentry-go/discussions)
@ -96,7 +98,7 @@ checkout the official documentation:
## License
Licensed under
[The 2-Clause BSD License](https://opensource.org/licenses/BSD-2-Clause), see
[The MIT License](https://opensource.org/licenses/mit/), see
[`LICENSE`](LICENSE).
## Community

View File

@ -0,0 +1,36 @@
package attribute
type Builder struct {
Key string
Value Value
}
// String returns a Builder for a string value.
func String(key, value string) Builder {
return Builder{key, StringValue(value)}
}
// Int64 returns a Builder for an int64.
func Int64(key string, value int64) Builder {
return Builder{key, Int64Value(value)}
}
// Int returns a Builder for an int64.
func Int(key string, value int) Builder {
return Builder{key, IntValue(value)}
}
// Float64 returns a Builder for a float64.
func Float64(key string, v float64) Builder {
return Builder{key, Float64Value(v)}
}
// Bool returns a Builder for a boolean.
func Bool(key string, v bool) Builder {
return Builder{key, BoolValue(v)}
}
// Valid checks for valid key and type.
func (b *Builder) Valid() bool {
return len(b.Key) > 0 && b.Value.Type() != INVALID
}

View File

@ -0,0 +1,49 @@
// Copied from https://github.com/open-telemetry/opentelemetry-go/blob/cc43e01c27892252aac9a8f20da28cdde957a289/attribute/rawhelpers.go
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute
import (
"math"
)
func boolToRaw(b bool) uint64 { // b is not a control flag.
if b {
return 1
}
return 0
}
func rawToBool(r uint64) bool {
return r != 0
}
func int64ToRaw(i int64) uint64 {
// Assumes original was a valid int64 (overflow not checked).
return uint64(i) // nolint: gosec
}
func rawToInt64(r uint64) int64 {
// Assumes original was a valid int64 (overflow not checked).
return int64(r) // nolint: gosec
}
func float64ToRaw(f float64) uint64 {
return math.Float64bits(f)
}
func rawToFloat64(r uint64) float64 {
return math.Float64frombits(r)
}

View File

@ -0,0 +1,207 @@
// Adapted from https://github.com/open-telemetry/opentelemetry-go/blob/cc43e01c27892252aac9a8f20da28cdde957a289/attribute/value.go
//
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package attribute
import (
"encoding/json"
"fmt"
"strconv"
)
// Type describes the type of the data Value holds.
type Type int // redefines builtin Type.
// Value represents the value part in key-value pairs.
type Value struct {
vtype Type
numeric uint64
stringly string
}
const (
// INVALID is used for a Value with no value set.
INVALID Type = iota
// BOOL is a boolean Type Value.
BOOL
// INT64 is a 64-bit signed integral Type Value.
INT64
// FLOAT64 is a 64-bit floating point Type Value.
FLOAT64
// STRING is a string Type Value.
STRING
// UINT64 is a 64-bit unsigned integral Type Value.
//
// This type is intentionally not exposed through the Builder API.
UINT64
)
// BoolValue creates a BOOL Value.
func BoolValue(v bool) Value {
return Value{
vtype: BOOL,
numeric: boolToRaw(v),
}
}
// IntValue creates an INT64 Value.
func IntValue(v int) Value {
return Int64Value(int64(v))
}
// Int64Value creates an INT64 Value.
func Int64Value(v int64) Value {
return Value{
vtype: INT64,
numeric: int64ToRaw(v),
}
}
// Float64Value creates a FLOAT64 Value.
func Float64Value(v float64) Value {
return Value{
vtype: FLOAT64,
numeric: float64ToRaw(v),
}
}
// StringValue creates a STRING Value.
func StringValue(v string) Value {
return Value{
vtype: STRING,
stringly: v,
}
}
// Uint64Value creates a UINT64 Value.
//
// This constructor is intentionally not exposed through the Builder API.
func Uint64Value(v uint64) Value {
return Value{
vtype: UINT64,
numeric: v,
}
}
// Type returns a type of the Value.
func (v Value) Type() Type {
return v.vtype
}
// AsBool returns the bool value. Make sure that the Value's type is
// BOOL.
func (v Value) AsBool() bool {
return rawToBool(v.numeric)
}
// AsInt64 returns the int64 value. Make sure that the Value's type is
// INT64.
func (v Value) AsInt64() int64 {
return rawToInt64(v.numeric)
}
// AsFloat64 returns the float64 value. Make sure that the Value's
// type is FLOAT64.
func (v Value) AsFloat64() float64 {
return rawToFloat64(v.numeric)
}
// AsString returns the string value. Make sure that the Value's type
// is STRING.
func (v Value) AsString() string {
return v.stringly
}
// AsUint64 returns the uint64 value. Make sure that the Value's type is
// UINT64.
func (v Value) AsUint64() uint64 {
return v.numeric
}
type unknownValueType struct{}
// AsInterface returns Value's data as interface{}.
func (v Value) AsInterface() interface{} {
switch v.Type() {
case BOOL:
return v.AsBool()
case INT64:
return v.AsInt64()
case FLOAT64:
return v.AsFloat64()
case STRING:
return v.stringly
case UINT64:
return v.numeric
}
return unknownValueType{}
}
// String returns a string representation of Value's data.
func (v Value) String() string {
switch v.Type() {
case BOOL:
return strconv.FormatBool(v.AsBool())
case INT64:
return strconv.FormatInt(v.AsInt64(), 10)
case FLOAT64:
return fmt.Sprint(v.AsFloat64())
case STRING:
return v.stringly
case UINT64:
return strconv.FormatUint(v.numeric, 10)
default:
return "unknown"
}
}
// MarshalJSON returns the JSON encoding of the Value.
func (v Value) MarshalJSON() ([]byte, error) {
var jsonVal struct {
Value any `json:"value"`
Type string `json:"type"`
}
jsonVal.Type = mapTypesToStr[v.Type()]
jsonVal.Value = v.AsInterface()
return json.Marshal(jsonVal)
}
func (t Type) String() string {
switch t {
case BOOL:
return "bool"
case INT64:
return "int64"
case FLOAT64:
return "float64"
case STRING:
return "string"
case UINT64:
return "uint64"
}
return "invalid"
}
// mapTypesToStr is a map from attribute.Type to the primitive types the server understands.
// https://develop.sentry.dev/sdk/foundations/data-model/attributes/#primitive-types
var mapTypesToStr = map[Type]string{
INVALID: "",
BOOL: "boolean",
INT64: "integer",
FLOAT64: "double",
STRING: "string",
UINT64: "integer", // wire format: same "integer" type
}

View File

@ -0,0 +1,136 @@
package sentry
import (
"context"
"sync"
"time"
)
const (
batchSize = 100
defaultBatchTimeout = 5 * time.Second
)
type batchProcessor[T any] struct {
sendBatch func([]T)
itemCh chan T
flushCh chan chan struct{}
cancel context.CancelFunc
wg sync.WaitGroup
startOnce sync.Once
shutdownOnce sync.Once
batchTimeout time.Duration
}
func newBatchProcessor[T any](sendBatch func([]T)) *batchProcessor[T] {
return &batchProcessor[T]{
itemCh: make(chan T, batchSize),
flushCh: make(chan chan struct{}),
sendBatch: sendBatch,
batchTimeout: defaultBatchTimeout,
}
}
// WithBatchTimeout sets a custom batch timeout for the processor.
// This is useful for testing or when different timing behavior is needed.
func (p *batchProcessor[T]) WithBatchTimeout(timeout time.Duration) *batchProcessor[T] {
p.batchTimeout = timeout
return p
}
func (p *batchProcessor[T]) Send(item T) bool {
select {
case p.itemCh <- item:
return true
default:
return false
}
}
func (p *batchProcessor[T]) Start() {
p.startOnce.Do(func() {
ctx, cancel := context.WithCancel(context.Background())
p.cancel = cancel
p.wg.Add(1)
go p.run(ctx)
})
}
func (p *batchProcessor[T]) Flush(timeout <-chan struct{}) {
done := make(chan struct{})
select {
case p.flushCh <- done:
select {
case <-done:
case <-timeout:
}
case <-timeout:
}
}
func (p *batchProcessor[T]) Shutdown() {
p.shutdownOnce.Do(func() {
if p.cancel != nil {
p.cancel()
p.wg.Wait()
}
})
}
func (p *batchProcessor[T]) run(ctx context.Context) {
defer p.wg.Done()
var items []T
timer := time.NewTimer(0)
timer.Stop()
defer timer.Stop()
for {
select {
case item := <-p.itemCh:
if len(items) == 0 {
timer.Reset(p.batchTimeout)
}
items = append(items, item)
if len(items) >= batchSize {
p.sendBatch(items)
items = nil
}
case <-timer.C:
if len(items) > 0 {
p.sendBatch(items)
items = nil
}
case done := <-p.flushCh:
flushDrain:
for {
select {
case item := <-p.itemCh:
items = append(items, item)
default:
break flushDrain
}
}
if len(items) > 0 {
p.sendBatch(items)
items = nil
}
close(done)
case <-ctx.Done():
drain:
for {
select {
case item := <-p.itemCh:
items = append(items, item)
default:
break drain
}
}
if len(items) > 0 {
p.sendBatch(items)
}
return
}
}
}

121
vendor/github.com/getsentry/sentry-go/check_in.go generated vendored Normal file
View File

@ -0,0 +1,121 @@
package sentry
import "time"
type CheckInStatus string
const (
CheckInStatusInProgress CheckInStatus = "in_progress"
CheckInStatusOK CheckInStatus = "ok"
CheckInStatusError CheckInStatus = "error"
)
type checkInScheduleType string
const (
checkInScheduleTypeCrontab checkInScheduleType = "crontab"
checkInScheduleTypeInterval checkInScheduleType = "interval"
)
type MonitorSchedule interface {
// scheduleType is a private method that must be implemented for monitor schedule
// implementation. It should never be called. This method is made for having
// specific private implementation of MonitorSchedule interface.
scheduleType() checkInScheduleType
}
type crontabSchedule struct {
Type string `json:"type"`
Value string `json:"value"`
}
func (c crontabSchedule) scheduleType() checkInScheduleType {
return checkInScheduleTypeCrontab
}
// CrontabSchedule defines the MonitorSchedule with a cron format.
// Example: "8 * * * *".
func CrontabSchedule(scheduleString string) MonitorSchedule {
return crontabSchedule{
Type: string(checkInScheduleTypeCrontab),
Value: scheduleString,
}
}
type intervalSchedule struct {
Type string `json:"type"`
Value int64 `json:"value"`
Unit string `json:"unit"`
}
func (i intervalSchedule) scheduleType() checkInScheduleType {
return checkInScheduleTypeInterval
}
type MonitorScheduleUnit string
const (
MonitorScheduleUnitMinute MonitorScheduleUnit = "minute"
MonitorScheduleUnitHour MonitorScheduleUnit = "hour"
MonitorScheduleUnitDay MonitorScheduleUnit = "day"
MonitorScheduleUnitWeek MonitorScheduleUnit = "week"
MonitorScheduleUnitMonth MonitorScheduleUnit = "month"
MonitorScheduleUnitYear MonitorScheduleUnit = "year"
)
// IntervalSchedule defines the MonitorSchedule with an interval format.
//
// Example:
//
// IntervalSchedule(1, sentry.MonitorScheduleUnitDay)
func IntervalSchedule(value int64, unit MonitorScheduleUnit) MonitorSchedule {
return intervalSchedule{
Type: string(checkInScheduleTypeInterval),
Value: value,
Unit: string(unit),
}
}
type MonitorConfig struct { //nolint: maligned // prefer readability over optimal memory layout
Schedule MonitorSchedule `json:"schedule,omitempty"`
// The allowed margin of minutes after the expected check-in time that
// the monitor will not be considered missed for.
CheckInMargin int64 `json:"checkin_margin,omitempty"`
// The allowed duration in minutes that the monitor may be `in_progress`
// for before being considered failed due to timeout.
MaxRuntime int64 `json:"max_runtime,omitempty"`
// A tz database string representing the timezone which the monitor's execution schedule is in.
// See: https://en.wikipedia.org/wiki/List_of_tz_database_time_zones
Timezone string `json:"timezone,omitempty"`
// The number of consecutive failed check-ins it takes before an issue is created.
FailureIssueThreshold int64 `json:"failure_issue_threshold,omitempty"`
// The number of consecutive OK check-ins it takes before an issue is resolved.
RecoveryThreshold int64 `json:"recovery_threshold,omitempty"`
}
type CheckIn struct { //nolint: maligned // prefer readability over optimal memory layout
// Check-In ID (unique and client generated)
ID EventID `json:"check_in_id"`
// The distinct slug of the monitor.
MonitorSlug string `json:"monitor_slug"`
// The status of the check-in.
Status CheckInStatus `json:"status"`
// The duration of the check-in. Will only take effect if the status is ok or error.
Duration time.Duration `json:"duration,omitempty"`
}
// serializedCheckIn is used by checkInMarshalJSON method on Event struct.
// See https://develop.sentry.dev/sdk/check-ins/
type serializedCheckIn struct { //nolint: maligned
// Check-In ID (unique and client generated).
CheckInID string `json:"check_in_id"`
// The distinct slug of the monitor.
MonitorSlug string `json:"monitor_slug"`
// The status of the check-in.
Status CheckInStatus `json:"status"`
// The duration of the check-in in seconds. Will only take effect if the status is ok or error.
Duration float64 `json:"duration,omitempty"`
Release string `json:"release,omitempty"`
Environment string `json:"environment,omitempty"`
MonitorConfig *MonitorConfig `json:"monitor_config,omitempty"`
}

View File

@ -5,32 +5,44 @@ import (
"crypto/x509"
"fmt"
"io"
"log"
"math/rand"
"net/http"
"os"
"reflect"
"sort"
"strings"
"sync"
"time"
"github.com/getsentry/sentry-go/internal/debug"
"github.com/getsentry/sentry-go/internal/debuglog"
httpInternal "github.com/getsentry/sentry-go/internal/http"
"github.com/getsentry/sentry-go/internal/protocol"
"github.com/getsentry/sentry-go/internal/ratelimit"
"github.com/getsentry/sentry-go/internal/telemetry"
)
// maxErrorDepth is the maximum number of errors reported in a chain of errors.
// This protects the SDK from an arbitrarily long chain of wrapped errors.
//
// An additional consideration is that arguably reporting a long chain of errors
// is of little use when debugging production errors with Sentry. The Sentry UI
// is not optimized for long chains either. The top-level error together with a
// stack trace is often the most useful information.
const maxErrorDepth = 10
// The identifier of the SDK.
const sdkIdentifier = "sentry.go"
// defaultMaxSpans limits the default number of recorded spans per transaction. The limit is
// meant to bound memory usage and prevent too large transaction events that
// would be rejected by Sentry.
const defaultMaxSpans = 1000
const (
// maxErrorDepth is the maximum number of errors reported in a chain of errors.
// This protects the SDK from an arbitrarily long chain of wrapped errors.
//
// An additional consideration is that arguably reporting a long chain of errors
// is of little use when debugging production errors with Sentry. The Sentry UI
// is not optimized for long chains either. The top-level error together with a
// stack trace is often the most useful information.
maxErrorDepth = 100
// defaultMaxSpans limits the default number of recorded spans per transaction. The limit is
// meant to bound memory usage and prevent too large transaction events that
// would be rejected by Sentry.
defaultMaxSpans = 1000
// defaultMaxBreadcrumbs is the default maximum number of breadcrumbs added to
// an event. Can be overwritten with the MaxBreadcrumbs option.
defaultMaxBreadcrumbs = 100
)
// hostname is the host name reported by the kernel. It is precomputed once to
// avoid syscalls when capturing events.
@ -75,9 +87,9 @@ type usageError struct {
error
}
// Logger is an instance of log.Logger that is use to provide debug information about running Sentry Client
// can be enabled by either using Logger.SetOutput directly or with Debug client option.
var Logger = log.New(io.Discard, "[Sentry] ", log.LstdFlags)
// DebugLogger is an instance of log.Logger that is used to provide debug information about running Sentry Client
// can be enabled by either using debuglog.SetOutput directly or with Debug client option.
var DebugLogger = debuglog.GetLogger()
// EventProcessor is a function that processes an event.
// Event processors are used to change an event before it is sent to Sentry.
@ -88,7 +100,7 @@ type EventProcessor func(event *Event, hint *EventHint) *Event
// ApplyToEvent changes an event based on external data and/or
// an event hint.
type EventModifier interface {
ApplyToEvent(event *Event, hint *EventHint) *Event
ApplyToEvent(event *Event, hint *EventHint, client *Client) *Event
}
var globalEventProcessors []EventProcessor
@ -131,19 +143,35 @@ type ClientOptions struct {
TracesSampleRate float64
// Used to customize the sampling of traces, overrides TracesSampleRate.
TracesSampler TracesSampler
// Control with URLs trace propagation should be enabled. Does not support regex patterns.
TracePropagationTargets []string
// PropagateTraceparent is used to control whether the W3C Trace Context HTTP traceparent header
// is propagated on outgoing http requests.
PropagateTraceparent bool
// List of regexp strings that will be used to match against event's message
// and if applicable, caught errors type and value.
// If the match is found, then a whole event will be dropped.
IgnoreErrors []string
// List of regexp strings that will be used to match against a transaction's
// name. If a match is found, then the transaction will be dropped.
IgnoreTransactions []string
// If this flag is enabled, certain personally identifiable information (PII) is added by active integrations.
// By default, no such data is sent.
SendDefaultPII bool
// BeforeSend is called before error events are sent to Sentry.
// Use it to mutate the event or return nil to discard the event.
// See EventProcessor if you need to mutate transactions.
// You can use it to mutate the event or return nil to discard it.
BeforeSend func(event *Event, hint *EventHint) *Event
// BeforeSendLong is called before log events are sent to Sentry.
// You can use it to mutate the log event or return nil to discard it.
BeforeSendLog func(event *Log) *Log
// BeforeSendTransaction is called before transaction events are sent to Sentry.
// Use it to mutate the transaction or return nil to discard the transaction.
BeforeSendTransaction func(event *Event, hint *EventHint) *Event
// Before breadcrumb add callback.
BeforeBreadcrumb func(breadcrumb *Breadcrumb, hint *BreadcrumbHint) *Breadcrumb
// BeforeSendMetric is called before metric events are sent to Sentry.
// You can use it to mutate the metric or return nil to discard it.
BeforeSendMetric func(metric *Metric) *Metric
// Integrations to be installed on the current Client, receives default
// integrations.
Integrations func([]Integration) []Integration
@ -214,18 +242,50 @@ type ClientOptions struct {
// is not optimized for long chains either. The top-level error together with a
// stack trace is often the most useful information.
MaxErrorDepth int
// Default event tags. These are overridden by tags set on a scope.
Tags map[string]string
// EnableLogs controls when logs should be emitted.
EnableLogs bool
// DisableMetrics controls when metrics should be emitted.
DisableMetrics bool
// TraceIgnoreStatusCodes is a list of HTTP status codes that should not be traced.
// Each element can be either:
// - A single-element slice [code] for a specific status code
// - A two-element slice [min, max] for a range of status codes (inclusive)
// When an HTTP request results in a status code that matches any of these codes or ranges,
// the transaction will not be sent to Sentry.
//
// Examples:
// [][]int{{404}} // ignore only status code 404
// [][]int{{400, 405}} // ignore status codes 400-405
// [][]int{{404}, {500}} // ignore status codes 404 and 500
// [][]int{{404}, {400, 405}, {500, 599}} // ignore 404, range 400-405, and range 500-599
//
// By default, this ignores 404 status codes.
//
// IMPORTANT: to not ignore any status codes, the option should be an empty slice and not nil. The nil option is
// used for defaulting to 404 ignores.
TraceIgnoreStatusCodes [][]int
// DisableTelemetryBuffer disables the telemetry buffer layer for prioritizing events and uses the old transport layer.
DisableTelemetryBuffer bool
}
// Client is the underlying processor that is used by the main API and Hub
// instances. It must be created with NewClient.
type Client struct {
mu sync.RWMutex
options ClientOptions
dsn *Dsn
eventProcessors []EventProcessor
integrations []Integration
sdkIdentifier string
sdkVersion string
// Transport is read-only. Replacing the transport of an existing client is
// not supported, create a new client instead.
Transport Transport
Transport Transport
batchLogger *logBatchProcessor
batchMeter *metricBatchProcessor
telemetryProcessor *telemetry.Processor
}
// NewClient creates and returns an instance of Client configured using
@ -236,12 +296,34 @@ type Client struct {
// single goroutine) or hub methods (for concurrent programs, for example web
// servers).
func NewClient(options ClientOptions) (*Client, error) {
// The default error event sample rate for all SDKs is 1.0 (send all).
//
// In Go, the zero value (default) for float64 is 0.0, which means that
// constructing a client with NewClient(ClientOptions{}), or, equivalently,
// initializing the SDK with Init(ClientOptions{}) without an explicit
// SampleRate would drop all events.
//
// To retain the desired default behavior, we exceptionally flip SampleRate
// from 0.0 to 1.0 here. Setting the sample rate to 0.0 is not very useful
// anyway, and the same end result can be achieved in many other ways like
// not initializing the SDK, setting the DSN to the empty string or using an
// event processor that always returns nil.
//
// An alternative API could be such that default options don't need to be
// the same as Go's zero values, for example using the Functional Options
// pattern. That would either require a breaking change if we want to reuse
// the obvious NewClient name, or a new function as an alternative
// constructor.
if options.SampleRate == 0.0 {
options.SampleRate = 1.0
}
if options.Debug {
debugWriter := options.DebugWriter
if debugWriter == nil {
debugWriter = os.Stderr
}
Logger.SetOutput(debugWriter)
debuglog.SetOutput(debugWriter)
}
if options.Dsn == "" {
@ -264,6 +346,10 @@ func NewClient(options ClientOptions) (*Client, error) {
options.MaxSpans = defaultMaxSpans
}
if options.TraceIgnoreStatusCodes == nil {
options.TraceIgnoreStatusCodes = [][]int{{404}}
}
// SENTRYGODEBUG is a comma-separated list of key=value pairs (similar
// to GODEBUG). It is not a supported feature: recognized debug options
// may change any time.
@ -297,11 +383,28 @@ func NewClient(options ClientOptions) (*Client, error) {
}
client := Client{
options: options,
dsn: dsn,
options: options,
dsn: dsn,
sdkIdentifier: sdkIdentifier,
sdkVersion: SDKVersion,
}
client.setupTransport()
// noop Telemetry Buffers and Processor fow now
// if !options.DisableTelemetryBuffer {
// client.setupTelemetryProcessor()
// } else
if options.EnableLogs {
client.batchLogger = newLogBatchProcessor(&client)
client.batchLogger.Start()
}
if !options.DisableMetrics {
client.batchMeter = newMetricBatchProcessor(&client)
client.batchMeter.Start()
}
client.setupIntegrations()
return &client, nil
@ -315,15 +418,7 @@ func (client *Client) setupTransport() {
if opts.Dsn == "" {
transport = new(noopTransport)
} else {
httpTransport := NewHTTPTransport()
// When tracing is enabled, use larger buffer to
// accommodate more concurrent events.
// TODO(tracing): consider using separate buffers per
// event type.
if opts.EnableTracing {
httpTransport.BufferSize = 1000
}
transport = httpTransport
transport = NewHTTPTransport()
}
}
@ -331,12 +426,65 @@ func (client *Client) setupTransport() {
client.Transport = transport
}
func (client *Client) setupTelemetryProcessor() { // nolint: unused
if client.options.DisableTelemetryBuffer {
return
}
if client.dsn == nil {
debuglog.Println("Telemetry buffer disabled: no DSN configured")
return
}
// We currently disallow using custom Transport with the new Telemetry Processor, due to the difference in transport signatures.
// The option should be enabled when the new Transport interface signature changes.
if client.options.Transport != nil {
debuglog.Println("Cannot enable Telemetry Processor/Buffers with custom Transport: fallback to old transport")
if client.options.EnableLogs {
client.batchLogger = newLogBatchProcessor(client)
client.batchLogger.Start()
}
if !client.options.DisableMetrics {
client.batchMeter = newMetricBatchProcessor(client)
client.batchMeter.Start()
}
return
}
transport := httpInternal.NewAsyncTransport(httpInternal.TransportOptions{
Dsn: client.options.Dsn,
HTTPClient: client.options.HTTPClient,
HTTPTransport: client.options.HTTPTransport,
HTTPProxy: client.options.HTTPProxy,
HTTPSProxy: client.options.HTTPSProxy,
CaCerts: client.options.CaCerts,
})
client.Transport = &internalAsyncTransportAdapter{transport: transport}
buffers := map[ratelimit.Category]telemetry.Buffer[protocol.TelemetryItem]{
ratelimit.CategoryError: telemetry.NewRingBuffer[protocol.TelemetryItem](ratelimit.CategoryError, 100, telemetry.OverflowPolicyDropOldest, 1, 0),
ratelimit.CategoryTransaction: telemetry.NewRingBuffer[protocol.TelemetryItem](ratelimit.CategoryTransaction, 1000, telemetry.OverflowPolicyDropOldest, 1, 0),
ratelimit.CategoryLog: telemetry.NewRingBuffer[protocol.TelemetryItem](ratelimit.CategoryLog, 10*100, telemetry.OverflowPolicyDropOldest, 100, 5*time.Second),
ratelimit.CategoryMonitor: telemetry.NewRingBuffer[protocol.TelemetryItem](ratelimit.CategoryMonitor, 100, telemetry.OverflowPolicyDropOldest, 1, 0),
ratelimit.CategoryTraceMetric: telemetry.NewRingBuffer[protocol.TelemetryItem](ratelimit.CategoryTraceMetric, 10*100, telemetry.OverflowPolicyDropOldest, 100, 5*time.Second),
}
sdkInfo := &protocol.SdkInfo{
Name: client.sdkIdentifier,
Version: client.sdkVersion,
}
client.telemetryProcessor = telemetry.NewProcessor(buffers, transport, &client.dsn.Dsn, sdkInfo)
}
func (client *Client) setupIntegrations() {
integrations := []Integration{
new(contextifyFramesIntegration),
new(environmentIntegration),
new(modulesIntegration),
new(ignoreErrorsIntegration),
new(ignoreTransactionsIntegration),
new(globalTagsIntegration),
}
if client.options.Integrations != nil {
@ -345,12 +493,12 @@ func (client *Client) setupIntegrations() {
for _, integration := range integrations {
if client.integrationAlreadyInstalled(integration.Name()) {
Logger.Printf("Integration %s is already installed\n", integration.Name())
debuglog.Printf("Integration %s is already installed\n", integration.Name())
continue
}
client.integrations = append(client.integrations, integration)
integration.SetupOnce(client)
Logger.Printf("Integration installed: %s\n", integration.Name())
debuglog.Printf("Integration installed: %s\n", integration.Name())
}
sort.Slice(client.integrations, func(i, j int) bool {
@ -370,31 +518,98 @@ func (client *Client) AddEventProcessor(processor EventProcessor) {
}
// Options return ClientOptions for the current Client.
func (client Client) Options() ClientOptions {
func (client *Client) Options() ClientOptions {
// Note: internally, consider using `client.options` instead of `client.Options()` to avoid copying the object each time.
return client.options
}
// CaptureMessage captures an arbitrary message.
func (client *Client) CaptureMessage(message string, hint *EventHint, scope EventModifier) *EventID {
event := client.eventFromMessage(message, LevelInfo)
event := client.EventFromMessage(message, LevelInfo)
return client.CaptureEvent(event, hint, scope)
}
// CaptureException captures an error.
func (client *Client) CaptureException(exception error, hint *EventHint, scope EventModifier) *EventID {
event := client.eventFromException(exception, LevelError)
event := client.EventFromException(exception, LevelError)
return client.CaptureEvent(event, hint, scope)
}
// CaptureCheckIn captures a check in.
func (client *Client) CaptureCheckIn(checkIn *CheckIn, monitorConfig *MonitorConfig, scope EventModifier) *EventID {
event := client.EventFromCheckIn(checkIn, monitorConfig)
if event != nil && event.CheckIn != nil {
client.CaptureEvent(event, nil, scope)
return &event.CheckIn.ID
}
return nil
}
// CaptureEvent captures an event on the currently active client if any.
//
// The event must already be assembled. Typically code would instead use
// The event must already be assembled. Typically, code would instead use
// the utility methods like CaptureException. The return value is the
// event ID. In case Sentry is disabled or event was dropped, the return value will be nil.
func (client *Client) CaptureEvent(event *Event, hint *EventHint, scope EventModifier) *EventID {
return client.processEvent(event, hint, scope)
}
func (client *Client) captureLog(log *Log, _ *Scope) bool {
if log == nil {
return false
}
if client.options.BeforeSendLog != nil {
log = client.options.BeforeSendLog(log)
if log == nil {
debuglog.Println("Log dropped due to BeforeSendLog callback.")
return false
}
}
if client.telemetryProcessor != nil {
if !client.telemetryProcessor.Add(log) {
debuglog.Print("Dropping log: telemetry buffer full or category missing")
return false
}
} else if client.batchLogger != nil {
if !client.batchLogger.Send(log) {
debuglog.Printf("Dropping log [%s]: buffer full", log.Level)
return false
}
}
return true
}
func (client *Client) captureMetric(metric *Metric, _ *Scope) bool {
if metric == nil {
return false
}
if client.options.BeforeSendMetric != nil {
metric = client.options.BeforeSendMetric(metric)
if metric == nil {
debuglog.Println("Metric dropped due to BeforeSendMetric callback.")
return false
}
}
if client.telemetryProcessor != nil {
if !client.telemetryProcessor.Add(metric) {
debuglog.Printf("Dropping metric: telemetry buffer full or category missing")
return false
}
} else if client.batchMeter != nil {
if !client.batchMeter.Send(metric) {
debuglog.Printf("Dropping metric %q: buffer full", metric.Name)
return false
}
}
return true
}
// Recover captures a panic.
// Returns EventID if successfully, or nil if there's no error to recover from.
func (client *Client) Recover(err interface{}, hint *EventHint, scope EventModifier) *EventID {
@ -437,11 +652,11 @@ func (client *Client) RecoverWithContext(
var event *Event
switch err := err.(type) {
case error:
event = client.eventFromException(err, LevelFatal)
event = client.EventFromException(err, LevelFatal)
case string:
event = client.eventFromMessage(err, LevelFatal)
event = client.EventFromMessage(err, LevelFatal)
default:
event = client.eventFromMessage(fmt.Sprintf("%#v", err), LevelFatal)
event = client.EventFromMessage(fmt.Sprintf("%#v", err), LevelFatal)
}
return client.CaptureEvent(event, hint, scope)
}
@ -458,19 +673,67 @@ func (client *Client) RecoverWithContext(
// the network synchronously, configure it to use the HTTPSyncTransport in the
// call to Init.
func (client *Client) Flush(timeout time.Duration) bool {
if client.batchLogger != nil || client.batchMeter != nil || client.telemetryProcessor != nil {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
return client.FlushWithContext(ctx)
}
return client.Transport.Flush(timeout)
}
func (client *Client) eventFromMessage(message string, level Level) *Event {
// FlushWithContext waits until the underlying Transport sends any buffered events
// to the Sentry server, blocking for at most the duration specified by the context.
// It returns false if the context is canceled before the events are sent. In such a case,
// some events may not be delivered.
//
// FlushWithContext should be called before terminating the program to ensure no
// events are unintentionally dropped.
//
// Avoid calling FlushWithContext indiscriminately after each call to CaptureEvent,
// CaptureException, or CaptureMessage. To send events synchronously over the network,
// configure the SDK to use HTTPSyncTransport during initialization with Init.
func (client *Client) FlushWithContext(ctx context.Context) bool {
if client.batchLogger != nil {
client.batchLogger.Flush(ctx.Done())
}
if client.batchMeter != nil {
client.batchMeter.Flush(ctx.Done())
}
if client.telemetryProcessor != nil {
return client.telemetryProcessor.FlushWithContext(ctx)
}
return client.Transport.FlushWithContext(ctx)
}
// Close clean up underlying Transport resources.
//
// Close should be called after Flush and before terminating the program
// otherwise some events may be lost.
func (client *Client) Close() {
if client.telemetryProcessor != nil {
client.telemetryProcessor.Close(5 * time.Second)
}
if client.batchLogger != nil {
client.batchLogger.Shutdown()
}
if client.batchMeter != nil {
client.batchMeter.Shutdown()
}
client.Transport.Close()
}
// EventFromMessage creates an event from the given message string.
func (client *Client) EventFromMessage(message string, level Level) *Event {
if message == "" {
err := usageError{fmt.Errorf("%s called with empty message", callerFunctionName())}
return client.eventFromException(err, level)
return client.EventFromException(err, level)
}
event := NewEvent()
event.Level = level
event.Message = message
if client.Options().AttachStacktrace {
if client.options.AttachStacktrace {
event.Threads = []Thread{{
Stacktrace: NewStacktrace(),
Crashed: false,
@ -481,51 +744,60 @@ func (client *Client) eventFromMessage(message string, level Level) *Event {
return event
}
func (client *Client) eventFromException(exception error, level Level) *Event {
// EventFromException creates a new Sentry event from the given `error` instance.
func (client *Client) EventFromException(exception error, level Level) *Event {
event := NewEvent()
event.Level = level
err := exception
if err == nil {
err = usageError{fmt.Errorf("%s called with nil error", callerFunctionName())}
}
event := NewEvent()
event.Level = level
for i := 0; i < client.options.MaxErrorDepth && err != nil; i++ {
event.Exception = append(event.Exception, Exception{
Value: err.Error(),
Type: reflect.TypeOf(err).String(),
Stacktrace: ExtractStacktrace(err),
})
switch previous := err.(type) {
case interface{ Unwrap() error }:
err = previous.Unwrap()
case interface{ Cause() error }:
err = previous.Cause()
default:
err = nil
}
}
// Add a trace of the current stack to the most recent error in a chain if
// it doesn't have a stack trace yet.
// We only add to the most recent error to avoid duplication and because the
// current stack is most likely unrelated to errors deeper in the chain.
if event.Exception[0].Stacktrace == nil {
event.Exception[0].Stacktrace = NewStacktrace()
}
// event.Exception should be sorted such that the most recent error is last.
reverse(event.Exception)
event.SetException(err, client.options.MaxErrorDepth)
return event
}
// reverse reverses the slice a in place.
func reverse(a []Exception) {
for i := len(a)/2 - 1; i >= 0; i-- {
opp := len(a) - 1 - i
a[i], a[opp] = a[opp], a[i]
// EventFromCheckIn creates a new Sentry event from the given `check_in` instance.
func (client *Client) EventFromCheckIn(checkIn *CheckIn, monitorConfig *MonitorConfig) *Event {
if checkIn == nil {
return nil
}
event := NewEvent()
event.Type = checkInType
var checkInID EventID
if checkIn.ID == "" {
checkInID = EventID(uuid())
} else {
checkInID = checkIn.ID
}
event.CheckIn = &CheckIn{
ID: checkInID,
MonitorSlug: checkIn.MonitorSlug,
Status: checkIn.Status,
Duration: checkIn.Duration,
}
event.MonitorConfig = monitorConfig
return event
}
func (client *Client) SetSDKIdentifier(identifier string) {
client.mu.Lock()
defer client.mu.Unlock()
client.sdkIdentifier = identifier
}
func (client *Client) GetSDKIdentifier() string {
client.mu.RLock()
defer client.mu.RUnlock()
return client.sdkIdentifier
}
func (client *Client) processEvent(event *Event, hint *EventHint, scope EventModifier) *EventID {
@ -534,35 +806,11 @@ func (client *Client) processEvent(event *Event, hint *EventHint, scope EventMod
return client.CaptureException(err, hint, scope)
}
options := client.Options()
// The default error event sample rate for all SDKs is 1.0 (send all).
//
// In Go, the zero value (default) for float64 is 0.0, which means that
// constructing a client with NewClient(ClientOptions{}), or, equivalently,
// initializing the SDK with Init(ClientOptions{}) without an explicit
// SampleRate would drop all events.
//
// To retain the desired default behavior, we exceptionally flip SampleRate
// from 0.0 to 1.0 here. Setting the sample rate to 0.0 is not very useful
// anyway, and the same end result can be achieved in many other ways like
// not initializing the SDK, setting the DSN to the empty string or using an
// event processor that always returns nil.
//
// An alternative API could be such that default options don't need to be
// the same as Go's zero values, for example using the Functional Options
// pattern. That would either require a breaking change if we want to reuse
// the obvious NewClient name, or a new function as an alternative
// constructor.
if options.SampleRate == 0.0 {
options.SampleRate = 1.0
}
// Transactions are sampled by options.TracesSampleRate or
// options.TracesSampler when they are started. All other events
// (errors, messages) are sampled here.
if event.Type != transactionType && !sample(options.SampleRate) {
Logger.Println("Event dropped due to SampleRate hit.")
// options.TracesSampler when they are started. Other events
// (errors, messages) are sampled here. Does not apply to check-ins.
if event.Type != transactionType && event.Type != checkInType && !sample(client.options.SampleRate) {
debuglog.Println("Event dropped due to SampleRate hit.")
return nil
}
@ -570,24 +818,42 @@ func (client *Client) processEvent(event *Event, hint *EventHint, scope EventMod
return nil
}
// As per spec, transactions do not go through BeforeSend.
if event.Type != transactionType && options.BeforeSend != nil {
if hint == nil {
hint = &EventHint{}
// Apply beforeSend* processors
if hint == nil {
hint = &EventHint{}
}
switch event.Type {
case transactionType:
if client.options.BeforeSendTransaction != nil {
if event = client.options.BeforeSendTransaction(event, hint); event == nil {
debuglog.Println("Transaction dropped due to BeforeSendTransaction callback.")
return nil
}
}
if event = options.BeforeSend(event, hint); event == nil {
Logger.Println("Event dropped due to BeforeSend callback.")
return nil
case checkInType: // not a default case, since we shouldn't apply BeforeSend on check-in events
default:
if client.options.BeforeSend != nil {
if event = client.options.BeforeSend(event, hint); event == nil {
debuglog.Println("Event dropped due to BeforeSend callback.")
return nil
}
}
}
client.Transport.SendEvent(event)
if client.telemetryProcessor != nil {
if !client.telemetryProcessor.Add(event) {
debuglog.Println("Event dropped: telemetry buffer full or unavailable")
}
} else {
client.Transport.SendEvent(event)
}
return &event.EventID
}
func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventModifier) *Event {
if event.EventID == "" {
// TODO set EventID when the event is created, same as in other SDKs. It's necessary for profileTransaction.ID.
event.EventID = EventID(uuid())
}
@ -600,7 +866,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
}
if event.ServerName == "" {
event.ServerName = client.Options().ServerName
event.ServerName = client.options.ServerName
if event.ServerName == "" {
event.ServerName = hostname
@ -608,20 +874,20 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
}
if event.Release == "" {
event.Release = client.Options().Release
event.Release = client.options.Release
}
if event.Dist == "" {
event.Dist = client.Options().Dist
event.Dist = client.options.Dist
}
if event.Environment == "" {
event.Environment = client.Options().Environment
event.Environment = client.options.Environment
}
event.Platform = "go"
event.Sdk = SdkInfo{
Name: SDKIdentifier,
Name: client.GetSDKIdentifier(),
Version: SDKVersion,
Integrations: client.listIntegrations(),
Packages: []SdkPackage{{
@ -631,7 +897,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
}
if scope != nil {
event = scope.ApplyToEvent(event, hint)
event = scope.ApplyToEvent(event, hint, client)
if event == nil {
return nil
}
@ -641,7 +907,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Client EventProcessors: %s\n", id)
debuglog.Printf("Event dropped by one of the Client EventProcessors: %s\n", id)
return nil
}
}
@ -650,7 +916,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Global EventProcessors: %s\n", id)
debuglog.Printf("Event dropped by one of the Global EventProcessors: %s\n", id)
return nil
}
}
@ -658,7 +924,7 @@ func (client *Client) prepareEvent(event *Event, hint *EventHint, scope EventMod
return event
}
func (client Client) listIntegrations() []string {
func (client *Client) listIntegrations() []string {
integrations := make([]string, len(client.integrations))
for i, integration := range client.integrations {
integrations[i] = integration.Name()
@ -666,7 +932,7 @@ func (client Client) listIntegrations() []string {
return integrations
}
func (client Client) integrationAlreadyInstalled(name string) bool {
func (client *Client) integrationAlreadyInstalled(name string) bool {
for _, integration := range client.integrations {
if integration.Name() == name {
return true

View File

@ -1,64 +1,6 @@
/*
Package sentry is the official Sentry SDK for Go.
Package repository: https://github.com/getsentry/sentry-go/
Use it to report errors and track application performance through distributed
tracing.
For more information about Sentry and SDK features please have a look at the
documentation site https://docs.sentry.io/platforms/go/.
# Basic Usage
The first step is to initialize the SDK, providing at a minimum the DSN of your
Sentry project. This step is accomplished through a call to sentry.Init.
func main() {
err := sentry.Init(...)
...
}
A more detailed yet simple example is available at
https://github.com/getsentry/sentry-go/blob/master/example/basic/main.go.
# Error Reporting
The Capture* functions report messages and errors to Sentry.
sentry.CaptureMessage(...)
sentry.CaptureException(...)
sentry.CaptureEvent(...)
Use similarly named functions in the Hub for concurrent programs like web
servers.
# Performance Monitoring
You can use Sentry to monitor your application's performance. More information
on the product page https://docs.sentry.io/product/performance/.
The StartSpan function creates new spans.
span := sentry.StartSpan(ctx, "operation")
...
span.Finish()
# Integrations
The SDK has support for several Go frameworks, available as subpackages.
# Getting Support
For paid Sentry.io accounts, head out to https://sentry.io/support.
For all users, support channels include:
Forum: https://forum.sentry.io
Discord: https://discord.gg/Ww9hbqr (#go channel)
If you found an issue with the SDK, please report through
https://github.com/getsentry/sentry-go/issues/new/choose.
For responsibly disclosing a security issue, please follow the steps in
https://sentry.io/security/#vulnerability-disclosure.
For more information about Sentry and SDK features, please have a look at the official documentation site: https://docs.sentry.io/platforms/go/
*/
package sentry

View File

@ -1,204 +1,37 @@
package sentry
import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
"time"
"github.com/getsentry/sentry-go/internal/protocol"
)
type scheme string
// Re-export protocol types to maintain public API compatibility
const (
schemeHTTP scheme = "http"
schemeHTTPS scheme = "https"
)
func (scheme scheme) defaultPort() int {
switch scheme {
case schemeHTTPS:
return 443
case schemeHTTP:
return 80
default:
return 80
}
// Dsn is used as the remote address source to client transport.
type Dsn struct {
protocol.Dsn
}
// DsnParseError represents an error that occurs if a Sentry
// DSN cannot be parsed.
type DsnParseError struct {
Message string
}
func (e DsnParseError) Error() string {
return "[Sentry] DsnParseError: " + e.Message
}
// Dsn is used as the remote address source to client transport.
type Dsn struct {
scheme scheme
publicKey string
secretKey string
host string
port int
path string
projectID string
}
type DsnParseError = protocol.DsnParseError
// NewDsn creates a Dsn by parsing rawURL. Most users will never call this
// function directly. It is provided for use in custom Transport
// implementations.
func NewDsn(rawURL string) (*Dsn, error) {
// Parse
parsedURL, err := url.Parse(rawURL)
protocolDsn, err := protocol.NewDsn(rawURL)
if err != nil {
return nil, &DsnParseError{fmt.Sprintf("invalid url: %v", err)}
return nil, err
}
// Scheme
var scheme scheme
switch parsedURL.Scheme {
case "http":
scheme = schemeHTTP
case "https":
scheme = schemeHTTPS
default:
return nil, &DsnParseError{"invalid scheme"}
}
// PublicKey
publicKey := parsedURL.User.Username()
if publicKey == "" {
return nil, &DsnParseError{"empty username"}
}
// SecretKey
var secretKey string
if parsedSecretKey, ok := parsedURL.User.Password(); ok {
secretKey = parsedSecretKey
}
// Host
host := parsedURL.Hostname()
if host == "" {
return nil, &DsnParseError{"empty host"}
}
// Port
var port int
if parsedURL.Port() != "" {
parsedPort, err := strconv.Atoi(parsedURL.Port())
if err != nil {
return nil, &DsnParseError{"invalid port"}
}
port = parsedPort
} else {
port = scheme.defaultPort()
}
// ProjectID
if parsedURL.Path == "" || parsedURL.Path == "/" {
return nil, &DsnParseError{"empty project id"}
}
pathSegments := strings.Split(parsedURL.Path[1:], "/")
projectID := pathSegments[len(pathSegments)-1]
if projectID == "" {
return nil, &DsnParseError{"empty project id"}
}
// Path
var path string
if len(pathSegments) > 1 {
path = "/" + strings.Join(pathSegments[0:len(pathSegments)-1], "/")
}
return &Dsn{
scheme: scheme,
publicKey: publicKey,
secretKey: secretKey,
host: host,
port: port,
path: path,
projectID: projectID,
}, nil
return &Dsn{Dsn: *protocolDsn}, nil
}
// String formats Dsn struct into a valid string url.
func (dsn Dsn) String() string {
var url string
url += fmt.Sprintf("%s://%s", dsn.scheme, dsn.publicKey)
if dsn.secretKey != "" {
url += fmt.Sprintf(":%s", dsn.secretKey)
}
url += fmt.Sprintf("@%s", dsn.host)
if dsn.port != dsn.scheme.defaultPort() {
url += fmt.Sprintf(":%d", dsn.port)
}
if dsn.path != "" {
url += dsn.path
}
url += fmt.Sprintf("/%s", dsn.projectID)
return url
}
// StoreAPIURL returns the URL of the store endpoint of the project associated
// with the DSN.
func (dsn Dsn) StoreAPIURL() *url.URL {
return dsn.getAPIURL("store")
}
// EnvelopeAPIURL returns the URL of the envelope endpoint of the project
// associated with the DSN.
func (dsn Dsn) EnvelopeAPIURL() *url.URL {
return dsn.getAPIURL("envelope")
}
func (dsn Dsn) getAPIURL(s string) *url.URL {
var rawURL string
rawURL += fmt.Sprintf("%s://%s", dsn.scheme, dsn.host)
if dsn.port != dsn.scheme.defaultPort() {
rawURL += fmt.Sprintf(":%d", dsn.port)
}
if dsn.path != "" {
rawURL += dsn.path
}
rawURL += fmt.Sprintf("/api/%s/%s/", dsn.projectID, s)
parsedURL, _ := url.Parse(rawURL)
return parsedURL
}
// RequestHeaders returns all the necessary headers that have to be used in the transport.
// RequestHeaders returns all the necessary headers that have to be used in the transport when sending events
// to the /store endpoint.
//
// Deprecated: This method shall only be used if you want to implement your own transport that sends events to
// the /store endpoint. If you're using the transport provided by the SDK, all necessary headers to authenticate
// against the /envelope endpoint are added automatically.
func (dsn Dsn) RequestHeaders() map[string]string {
auth := fmt.Sprintf("Sentry sentry_version=%s, sentry_timestamp=%d, "+
"sentry_client=sentry.go/%s, sentry_key=%s", apiVersion, time.Now().Unix(), Version, dsn.publicKey)
if dsn.secretKey != "" {
auth = fmt.Sprintf("%s, sentry_secret=%s", auth, dsn.secretKey)
}
return map[string]string{
"Content-Type": "application/json",
"X-Sentry-Auth": auth,
}
}
// MarshalJSON converts the Dsn struct to JSON.
func (dsn Dsn) MarshalJSON() ([]byte, error) {
return json.Marshal(dsn.String())
}
// UnmarshalJSON converts JSON data to the Dsn struct.
func (dsn *Dsn) UnmarshalJSON(data []byte) error {
var str string
_ = json.Unmarshal(data, &str)
newDsn, err := NewDsn(str)
if err != nil {
return err
}
*dsn = *newDsn
return nil
return dsn.Dsn.RequestHeaders(SDKVersion)
}

View File

@ -33,17 +33,24 @@ func DynamicSamplingContextFromHeader(header []byte) (DynamicSamplingContext, er
return DynamicSamplingContext{
Entries: entries,
Frozen: true,
// If there's at least one Sentry value, we consider the DSC frozen
Frozen: len(entries) > 0,
}, nil
}
func DynamicSamplingContextFromTransaction(span *Span) DynamicSamplingContext {
entries := map[string]string{}
hub := hubFromContext(span.Context())
scope := hub.Scope()
client := hub.Client()
options := client.Options()
if client == nil || scope == nil {
return DynamicSamplingContext{
Entries: map[string]string{},
Frozen: false,
}
}
entries := make(map[string]string)
if traceID := span.TraceID.String(); traceID != "" {
entries["trace_id"] = traceID
@ -53,32 +60,27 @@ func DynamicSamplingContextFromTransaction(span *Span) DynamicSamplingContext {
}
if dsn := client.dsn; dsn != nil {
if publicKey := dsn.publicKey; publicKey != "" {
if publicKey := dsn.GetPublicKey(); publicKey != "" {
entries["public_key"] = publicKey
}
}
if release := options.Release; release != "" {
if release := client.options.Release; release != "" {
entries["release"] = release
}
if environment := options.Environment; environment != "" {
if environment := client.options.Environment; environment != "" {
entries["environment"] = environment
}
// Only include the transaction name if it's of good quality (not empty and not SourceURL)
if span.Source != "" && span.Source != SourceURL {
if transactionName := scope.Transaction(); transactionName != "" {
entries["transaction"] = transactionName
if span.IsTransaction() {
entries["transaction"] = span.Name
}
}
if userSegment := scope.user.Segment; userSegment != "" {
entries["user_segment"] = userSegment
}
entries["sampled"] = strconv.FormatBool(span.Sampled.Bool())
return DynamicSamplingContext{
Entries: entries,
Frozen: true,
}
return DynamicSamplingContext{Entries: entries, Frozen: true}
}
func (d DynamicSamplingContext) HasEntries() bool {
@ -98,13 +100,55 @@ func (d DynamicSamplingContext) String() string {
}
members = append(members, member)
}
if len(members) > 0 {
baggage, err := baggage.New(members...)
if err != nil {
return ""
}
return baggage.String()
if len(members) == 0 {
return ""
}
return ""
baggage, err := baggage.New(members...)
if err != nil {
return ""
}
return baggage.String()
}
// Constructs a new DynamicSamplingContext using a scope and client. Accessing
// fields on the scope are not thread safe, and this function should only be
// called within scope methods.
func DynamicSamplingContextFromScope(scope *Scope, client *Client) DynamicSamplingContext {
entries := map[string]string{}
if client == nil || scope == nil {
return DynamicSamplingContext{
Entries: entries,
Frozen: false,
}
}
propagationContext := scope.propagationContext
if traceID := propagationContext.TraceID.String(); traceID != "" {
entries["trace_id"] = traceID
}
if sampleRate := client.options.TracesSampleRate; sampleRate != 0 {
entries["sample_rate"] = strconv.FormatFloat(sampleRate, 'f', -1, 64)
}
if dsn := client.dsn; dsn != nil {
if publicKey := dsn.GetPublicKey(); publicKey != "" {
entries["public_key"] = publicKey
}
}
if release := client.options.Release; release != "" {
entries["release"] = release
}
if environment := client.options.Environment; environment != "" {
entries["environment"] = environment
}
return DynamicSamplingContext{
Entries: entries,
Frozen: true,
}
}

129
vendor/github.com/getsentry/sentry-go/exception.go generated vendored Normal file
View File

@ -0,0 +1,129 @@
package sentry
import (
"fmt"
"reflect"
"slices"
)
const (
MechanismTypeGeneric string = "generic"
MechanismTypeChained string = "chained"
MechanismTypeUnwrap string = "unwrap"
MechanismSourceCause string = "cause"
)
type visited struct {
ptrs map[uintptr]struct{}
msgs map[string]struct{}
}
func (v *visited) seenError(err error) bool {
t := reflect.ValueOf(err)
if t.Kind() == reflect.Ptr && !t.IsNil() {
ptr := t.Pointer()
if _, ok := v.ptrs[ptr]; ok {
return true
}
v.ptrs[ptr] = struct{}{}
return false
}
key := t.String() + err.Error()
if _, ok := v.msgs[key]; ok {
return true
}
v.msgs[key] = struct{}{}
return false
}
func convertErrorToExceptions(err error, maxErrorDepth int) []Exception {
var exceptions []Exception
vis := &visited{
ptrs: make(map[uintptr]struct{}),
msgs: make(map[string]struct{}),
}
convertErrorDFS(err, &exceptions, nil, "", vis, maxErrorDepth, 0)
// mechanism type is used for debugging purposes, but since we can't really distinguish the origin of who invoked
// captureException, we set it to nil if the error is not chained.
if len(exceptions) == 1 {
exceptions[0].Mechanism = nil
}
slices.Reverse(exceptions)
// Add a trace of the current stack to the top level(outermost) error in a chain if
// it doesn't have a stack trace yet.
// We only add to the most recent error to avoid duplication and because the
// current stack is most likely unrelated to errors deeper in the chain.
if len(exceptions) > 0 && exceptions[len(exceptions)-1].Stacktrace == nil {
exceptions[len(exceptions)-1].Stacktrace = NewStacktrace()
}
return exceptions
}
func convertErrorDFS(err error, exceptions *[]Exception, parentID *int, source string, visited *visited, maxErrorDepth int, currentDepth int) {
if err == nil {
return
}
if visited.seenError(err) {
return
}
_, isExceptionGroup := err.(interface{ Unwrap() []error })
exception := Exception{
Value: err.Error(),
Type: reflect.TypeOf(err).String(),
Stacktrace: ExtractStacktrace(err),
}
currentID := len(*exceptions)
var mechanismType string
if parentID == nil {
mechanismType = MechanismTypeGeneric
source = ""
} else {
mechanismType = MechanismTypeChained
}
exception.Mechanism = &Mechanism{
Type: mechanismType,
ExceptionID: currentID,
ParentID: parentID,
Source: source,
IsExceptionGroup: isExceptionGroup,
}
*exceptions = append(*exceptions, exception)
if maxErrorDepth >= 0 && currentDepth >= maxErrorDepth {
return
}
switch v := err.(type) {
case interface{ Unwrap() []error }:
unwrapped := v.Unwrap()
for i := range unwrapped {
if unwrapped[i] != nil {
childSource := fmt.Sprintf("errors[%d]", i)
convertErrorDFS(unwrapped[i], exceptions, &currentID, childSource, visited, maxErrorDepth, currentDepth+1)
}
}
case interface{ Unwrap() error }:
unwrapped := v.Unwrap()
if unwrapped != nil {
convertErrorDFS(unwrapped, exceptions, &currentID, MechanismTypeUnwrap, visited, maxErrorDepth, currentDepth+1)
}
case interface{ Cause() error }:
cause := v.Cause()
if cause != nil {
convertErrorDFS(cause, exceptions, &currentID, MechanismSourceCause, visited, maxErrorDepth, currentDepth+1)
}
}
}

View File

@ -2,8 +2,11 @@ package sentry
import (
"context"
"fmt"
"sync"
"time"
"github.com/getsentry/sentry-go/internal/debuglog"
)
type contextKey int
@ -17,14 +20,6 @@ const (
RequestContextKey = contextKey(2)
)
// defaultMaxBreadcrumbs is the default maximum number of breadcrumbs added to
// an event. Can be overwritten with the maxBreadcrumbs option.
const defaultMaxBreadcrumbs = 30
// maxBreadcrumbs is the absolute maximum number of breadcrumbs added to an
// event. The maxBreadcrumbs option cannot be set higher than this value.
const maxBreadcrumbs = 100
// currentHub is the initial Hub with no Client bound and an empty Scope.
var currentHub = NewHub(nil, NewScope())
@ -267,6 +262,18 @@ func (hub *Hub) CaptureException(exception error) *EventID {
return eventID
}
// CaptureCheckIn calls the method of the same name on currently bound Client instance
// passing it a top-level Scope.
// Returns CheckInID if the check-in was captured successfully, or nil otherwise.
func (hub *Hub) CaptureCheckIn(checkIn *CheckIn, monitorConfig *MonitorConfig) *EventID {
client, scope := hub.Client(), hub.Scope()
if client == nil {
return nil
}
return client.CaptureCheckIn(checkIn, monitorConfig, scope)
}
// AddBreadcrumb records a new breadcrumb.
//
// The total number of breadcrumbs that can be recorded are limited by the
@ -276,33 +283,29 @@ func (hub *Hub) AddBreadcrumb(breadcrumb *Breadcrumb, hint *BreadcrumbHint) {
// If there's no client, just store it on the scope straight away
if client == nil {
hub.Scope().AddBreadcrumb(breadcrumb, maxBreadcrumbs)
hub.Scope().AddBreadcrumb(breadcrumb, defaultMaxBreadcrumbs)
return
}
options := client.Options()
max := options.MaxBreadcrumbs
if max < 0 {
limit := client.options.MaxBreadcrumbs
switch {
case limit < 0:
return
case limit == 0:
limit = defaultMaxBreadcrumbs
}
if options.BeforeBreadcrumb != nil {
if client.options.BeforeBreadcrumb != nil {
if hint == nil {
hint = &BreadcrumbHint{}
}
if breadcrumb = options.BeforeBreadcrumb(breadcrumb, hint); breadcrumb == nil {
Logger.Println("breadcrumb dropped due to BeforeBreadcrumb callback.")
if breadcrumb = client.options.BeforeBreadcrumb(breadcrumb, hint); breadcrumb == nil {
debuglog.Println("breadcrumb dropped due to BeforeBreadcrumb callback.")
return
}
}
if max == 0 {
max = defaultMaxBreadcrumbs
} else if max > maxBreadcrumbs {
max = maxBreadcrumbs
}
hub.Scope().AddBreadcrumb(breadcrumb, max)
hub.Scope().AddBreadcrumb(breadcrumb, limit)
}
// Recover calls the method of a same name on currently bound Client instance
@ -354,6 +357,67 @@ func (hub *Hub) Flush(timeout time.Duration) bool {
return client.Flush(timeout)
}
// FlushWithContext waits until the underlying Transport sends any buffered events
// to the Sentry server, blocking for at most the duration specified by the context.
// It returns false if the context is canceled before the events are sent. In such a case,
// some events may not be delivered.
//
// FlushWithContext should be called before terminating the program to ensure no
// events are unintentionally dropped.
//
// Avoid calling FlushWithContext indiscriminately after each call to CaptureEvent,
// CaptureException, or CaptureMessage. To send events synchronously over the network,
// configure the SDK to use HTTPSyncTransport during initialization with Init.
func (hub *Hub) FlushWithContext(ctx context.Context) bool {
client := hub.Client()
if client == nil {
return false
}
return client.FlushWithContext(ctx)
}
// GetTraceparent returns the current Sentry traceparent string, to be used as a HTTP header value
// or HTML meta tag value.
// This function is context aware, as in it either returns the traceparent based
// on the current span, or the scope's propagation context.
func (hub *Hub) GetTraceparent() string {
scope := hub.Scope()
if scope.span != nil {
return scope.span.ToSentryTrace()
}
return fmt.Sprintf("%s-%s", scope.propagationContext.TraceID, scope.propagationContext.SpanID)
}
// GetTraceparentW3C returns the current traceparent string in W3C format.
// This is intended for propagation to downstream services that expect the W3C header.
func (hub *Hub) GetTraceparentW3C() string {
scope := hub.Scope()
if scope.span != nil {
return scope.span.ToTraceparent()
}
return fmt.Sprintf("00-%s-%s-00", scope.propagationContext.TraceID, scope.propagationContext.SpanID)
}
// GetBaggage returns the current Sentry baggage string, to be used as a HTTP header value
// or HTML meta tag value.
// This function is context aware, as in it either returns the baggage based
// on the current span or the scope's propagation context.
func (hub *Hub) GetBaggage() string {
scope := hub.Scope()
if scope.span != nil {
return scope.span.ToBaggage()
}
return scope.propagationContext.DynamicSamplingContext.String()
}
// HasHubOnContext checks whether Hub instance is bound to a given Context struct.
func HasHubOnContext(ctx context.Context) bool {
_, ok := ctx.Value(HubContextKey).(*Hub)

View File

@ -2,11 +2,14 @@ package sentry
import (
"fmt"
"os"
"regexp"
"runtime"
"runtime/debug"
"strings"
"sync"
"github.com/getsentry/sentry-go/internal/debuglog"
)
// ================================
@ -26,12 +29,12 @@ func (mi *modulesIntegration) SetupOnce(client *Client) {
client.AddEventProcessor(mi.processor)
}
func (mi *modulesIntegration) processor(event *Event, hint *EventHint) *Event {
func (mi *modulesIntegration) processor(event *Event, _ *EventHint) *Event {
if len(event.Modules) == 0 {
mi.once.Do(func() {
info, ok := debug.ReadBuildInfo()
if !ok {
Logger.Print("The Modules integration is not available in binaries built without module support.")
debuglog.Print("The Modules integration is not available in binaries built without module support.")
return
}
mi.modules = extractModules(info)
@ -69,7 +72,7 @@ func (ei *environmentIntegration) SetupOnce(client *Client) {
client.AddEventProcessor(ei.processor)
}
func (ei *environmentIntegration) processor(event *Event, hint *EventHint) *Event {
func (ei *environmentIntegration) processor(event *Event, _ *EventHint) *Event {
// Initialize maps as necessary.
contextNames := []string{"device", "os", "runtime"}
if event.Contexts == nil {
@ -130,17 +133,17 @@ func (iei *ignoreErrorsIntegration) Name() string {
}
func (iei *ignoreErrorsIntegration) SetupOnce(client *Client) {
iei.ignoreErrors = transformStringsIntoRegexps(client.Options().IgnoreErrors)
iei.ignoreErrors = transformStringsIntoRegexps(client.options.IgnoreErrors)
client.AddEventProcessor(iei.processor)
}
func (iei *ignoreErrorsIntegration) processor(event *Event, hint *EventHint) *Event {
func (iei *ignoreErrorsIntegration) processor(event *Event, _ *EventHint) *Event {
suspects := getIgnoreErrorsSuspects(event)
for _, suspect := range suspects {
for _, pattern := range iei.ignoreErrors {
if pattern.Match([]byte(suspect)) {
Logger.Printf("Event dropped due to being matched by `IgnoreErrors` option."+
if pattern.Match([]byte(suspect)) || strings.Contains(suspect, pattern.String()) {
debuglog.Printf("Event dropped due to being matched by `IgnoreErrors` option."+
"| Value matched: %s | Filter used: %s", suspect, pattern)
return nil
}
@ -177,6 +180,40 @@ func getIgnoreErrorsSuspects(event *Event) []string {
return suspects
}
// ================================
// Ignore Transactions Integration
// ================================
type ignoreTransactionsIntegration struct {
ignoreTransactions []*regexp.Regexp
}
func (iei *ignoreTransactionsIntegration) Name() string {
return "IgnoreTransactions"
}
func (iei *ignoreTransactionsIntegration) SetupOnce(client *Client) {
iei.ignoreTransactions = transformStringsIntoRegexps(client.options.IgnoreTransactions)
client.AddEventProcessor(iei.processor)
}
func (iei *ignoreTransactionsIntegration) processor(event *Event, _ *EventHint) *Event {
suspect := event.Transaction
if suspect == "" {
return event
}
for _, pattern := range iei.ignoreTransactions {
if pattern.Match([]byte(suspect)) || strings.Contains(suspect, pattern.String()) {
debuglog.Printf("Transaction dropped due to being matched by `IgnoreTransactions` option."+
"| Value matched: %s | Filter used: %s", suspect, pattern)
return nil
}
}
return event
}
// ================================
// Contextify Frames Integration
// ================================
@ -198,7 +235,7 @@ func (cfi *contextifyFramesIntegration) SetupOnce(client *Client) {
client.AddEventProcessor(cfi.processor)
}
func (cfi *contextifyFramesIntegration) processor(event *Event, hint *EventHint) *Event {
func (cfi *contextifyFramesIntegration) processor(event *Event, _ *EventHint) *Event {
// Range over all exceptions
for _, ex := range event.Exception {
// If it has no stacktrace, just bail out
@ -291,3 +328,66 @@ func (cfi *contextifyFramesIntegration) addContextLinesToFrame(frame Frame, line
}
return frame
}
// ================================
// Global Tags Integration
// ================================
const envTagsPrefix = "SENTRY_TAGS_"
type globalTagsIntegration struct {
tags map[string]string
envTags map[string]string
}
func (ti *globalTagsIntegration) Name() string {
return "GlobalTags"
}
func (ti *globalTagsIntegration) SetupOnce(client *Client) {
ti.tags = make(map[string]string, len(client.options.Tags))
for k, v := range client.options.Tags {
ti.tags[k] = v
}
ti.envTags = loadEnvTags()
client.AddEventProcessor(ti.processor)
}
func (ti *globalTagsIntegration) processor(event *Event, _ *EventHint) *Event {
if len(ti.tags) == 0 && len(ti.envTags) == 0 {
return event
}
if event.Tags == nil {
event.Tags = make(map[string]string, len(ti.tags)+len(ti.envTags))
}
for k, v := range ti.tags {
if _, ok := event.Tags[k]; !ok {
event.Tags[k] = v
}
}
for k, v := range ti.envTags {
if _, ok := event.Tags[k]; !ok {
event.Tags[k] = v
}
}
return event
}
func loadEnvTags() map[string]string {
tags := map[string]string{}
for _, pair := range os.Environ() {
parts := strings.Split(pair, "=")
if !strings.HasPrefix(parts[0], envTagsPrefix) {
continue
}
tag := strings.TrimPrefix(parts[0], envTagsPrefix)
tags[tag] = parts[1]
}
return tags
}

View File

@ -8,13 +8,33 @@ import (
"net/http"
"strings"
"time"
"github.com/getsentry/sentry-go/attribute"
"github.com/getsentry/sentry-go/internal/debuglog"
"github.com/getsentry/sentry-go/internal/protocol"
"github.com/getsentry/sentry-go/internal/ratelimit"
)
// Protocol Docs (kinda)
// https://github.com/getsentry/rust-sentry-types/blob/master/src/protocol/v7.rs
// transactionType is the type of a transaction event.
const errorType = ""
const eventType = "event"
const transactionType = "transaction"
const checkInType = "check_in"
var logEvent = struct {
Type string
ContentType string
}{
"log",
"application/vnd.sentry.items.log+json",
}
var traceMetricEvent = struct {
Type string
ContentType string
}{
"trace_metric",
"application/vnd.sentry.items.trace-metric+json",
}
// Level marks the severity of the event.
type Level string
@ -28,28 +48,9 @@ const (
LevelFatal Level = "fatal"
)
func getSensitiveHeaders() map[string]bool {
return map[string]bool{
"Authorization": true,
"Cookie": true,
"X-Forwarded-For": true,
"X-Real-Ip": true,
}
}
// SdkInfo contains all metadata about about the SDK being used.
type SdkInfo struct {
Name string `json:"name,omitempty"`
Version string `json:"version,omitempty"`
Integrations []string `json:"integrations,omitempty"`
Packages []SdkPackage `json:"packages,omitempty"`
}
// SdkPackage describes a package that was installed.
type SdkPackage struct {
Name string `json:"name,omitempty"`
Version string `json:"version,omitempty"`
}
// SdkInfo contains all metadata about the SDK.
type SdkInfo = protocol.SdkInfo
type SdkPackage = protocol.SdkPackage
// TODO: This type could be more useful, as map of interface{} is too generic
// and requires a lot of type assertions in beforeBreadcrumb calls
@ -66,37 +67,126 @@ type Breadcrumb struct {
Message string `json:"message,omitempty"`
Data map[string]interface{} `json:"data,omitempty"`
Level Level `json:"level,omitempty"`
Timestamp time.Time `json:"timestamp"`
Timestamp time.Time `json:"timestamp,omitzero"`
}
// TODO: provide constants for known breadcrumb types.
// See https://develop.sentry.dev/sdk/event-payloads/breadcrumbs/#breadcrumb-types.
// MarshalJSON converts the Breadcrumb struct to JSON.
func (b *Breadcrumb) MarshalJSON() ([]byte, error) {
// We want to omit time.Time zero values, otherwise the server will try to
// interpret dates too far in the past. However, encoding/json doesn't
// support the "omitempty" option for struct types. See
// https://golang.org/issues/11939.
//
// We overcome the limitation and achieve what we want by shadowing fields
// and a few type tricks.
// Logger provides a chaining API for structured logging to Sentry.
type Logger interface {
// Write implements the io.Writer interface. Currently, the [sentry.Hub] is
// context aware, in order to get the correct trace correlation. Using this
// might result in incorrect span association on logs. If you need to use
// Write it is recommended to create a NewLogger so that the associated context
// is passed correctly.
Write(p []byte) (n int, err error)
// breadcrumb aliases Breadcrumb to allow calling json.Marshal without an
// infinite loop. It preserves all fields while none of the attached
// methods.
type breadcrumb Breadcrumb
// SetAttributes allows attaching parameters to the logger using the attribute API.
// These attributes will be included in all subsequent log entries.
SetAttributes(...attribute.Builder)
if b.Timestamp.IsZero() {
return json.Marshal(struct {
// Embed all of the fields of Breadcrumb.
*breadcrumb
// Timestamp shadows the original Timestamp field and is meant to
// remain nil, triggering the omitempty behavior.
Timestamp json.RawMessage `json:"timestamp,omitempty"`
}{breadcrumb: (*breadcrumb)(b)})
// Trace defines the [sentry.LogLevel] for the log entry.
Trace() LogEntry
// Debug defines the [sentry.LogLevel] for the log entry.
Debug() LogEntry
// Info defines the [sentry.LogLevel] for the log entry.
Info() LogEntry
// Warn defines the [sentry.LogLevel] for the log entry.
Warn() LogEntry
// Error defines the [sentry.LogLevel] for the log entry.
Error() LogEntry
// Fatal defines the [sentry.LogLevel] for the log entry.
Fatal() LogEntry
// Panic defines the [sentry.LogLevel] for the log entry.
Panic() LogEntry
// LFatal defines the [sentry.LogLevel] for the log entry. This only sets
// the level to fatal, but does not panic or exit.
LFatal() LogEntry
// GetCtx returns the [context.Context] set on the logger.
GetCtx() context.Context
}
// LogEntry defines the interface for a log entry that supports chaining attributes.
type LogEntry interface {
// WithCtx creates a new LogEntry with the specified context without overwriting the previous one.
WithCtx(ctx context.Context) LogEntry
// String adds a string attribute to the LogEntry.
String(key, value string) LogEntry
// Int adds an int attribute to the LogEntry.
Int(key string, value int) LogEntry
// Int64 adds an int64 attribute to the LogEntry.
Int64(key string, value int64) LogEntry
// Float64 adds a float64 attribute to the LogEntry.
Float64(key string, value float64) LogEntry
// Bool adds a bool attribute to the LogEntry.
Bool(key string, value bool) LogEntry
// Emit emits the LogEntry with the provided arguments.
Emit(args ...interface{})
// Emitf emits the LogEntry using a format string and arguments.
Emitf(format string, args ...interface{})
}
// Meter provides an interface for recording metrics.
type Meter interface {
// WithCtx returns a new Meter that uses the given context for trace/span association.
WithCtx(ctx context.Context) Meter
// SetAttributes allows attaching parameters to the meter using the attribute API.
// These attributes will be included in all subsequent metrics.
SetAttributes(attrs ...attribute.Builder)
// Count records a count metric.
Count(name string, count int64, opts ...MeterOption)
// Gauge records a gauge metric.
Gauge(name string, value float64, opts ...MeterOption)
// Distribution records a distribution metric.
Distribution(name string, sample float64, opts ...MeterOption)
}
// MeterOption configures a metric recording call.
type MeterOption func(*meterOptions)
type meterOptions struct {
unit string
scope *Scope
attributes map[string]attribute.Value
}
// WithUnit sets the unit for the metric (e.g., "millisecond", "byte").
func WithUnit(unit string) MeterOption {
return func(o *meterOptions) {
o.unit = unit
}
return json.Marshal((*breadcrumb)(b))
}
// WithScopeOverride sets a custom scope for the metric, overriding the default scope from the hub.
func WithScopeOverride(scope *Scope) MeterOption {
return func(o *meterOptions) {
o.scope = scope
}
}
// WithAttributes sets attributes for the metric.
func WithAttributes(attrs ...attribute.Builder) MeterOption {
return func(o *meterOptions) {
if o.attributes == nil {
o.attributes = make(map[string]attribute.Value, len(attrs))
}
for _, a := range attrs {
if a.Value.Type() == attribute.INVALID {
debuglog.Printf("invalid attribute: %v", a)
continue
}
o.attributes[a.Key] = a.Value
}
}
}
// Attachment allows associating files with your events to aid in investigation.
// An event may contain one or more attachments.
type Attachment struct {
Filename string
ContentType string
Payload []byte
}
// User describes the user associated with an Event. If this is used, at least
@ -107,32 +197,27 @@ type User struct {
IPAddress string `json:"ip_address,omitempty"`
Username string `json:"username,omitempty"`
Name string `json:"name,omitempty"`
Segment string `json:"segment,omitempty"`
Data map[string]string `json:"data,omitempty"`
}
func (u User) IsEmpty() bool {
if len(u.ID) > 0 {
if u.ID != "" {
return false
}
if len(u.Email) > 0 {
if u.Email != "" {
return false
}
if len(u.IPAddress) > 0 {
if u.IPAddress != "" {
return false
}
if len(u.Username) > 0 {
if u.Username != "" {
return false
}
if len(u.Name) > 0 {
return false
}
if len(u.Segment) > 0 {
if u.Name != "" {
return false
}
@ -154,39 +239,70 @@ type Request struct {
Env map[string]string `json:"env,omitempty"`
}
var sensitiveHeaders = map[string]struct{}{
"_csrf": {},
"_csrf_token": {},
"_session": {},
"_xsrf": {},
"Api-Key": {},
"Apikey": {},
"Auth": {},
"Authorization": {},
"Cookie": {},
"Credentials": {},
"Csrf": {},
"Csrf-Token": {},
"Csrftoken": {},
"Ip-Address": {},
"Passwd": {},
"Password": {},
"Private-Key": {},
"Privatekey": {},
"Proxy-Authorization": {},
"Remote-Addr": {},
"Secret": {},
"Session": {},
"Sessionid": {},
"Token": {},
"User-Session": {},
"X-Api-Key": {},
"X-Csrftoken": {},
"X-Forwarded-For": {},
"X-Real-Ip": {},
"XSRF-TOKEN": {},
}
// NewRequest returns a new Sentry Request from the given http.Request.
//
// NewRequest avoids operations that depend on network access. In particular, it
// does not read r.Body.
func NewRequest(r *http.Request) *Request {
protocol := schemeHTTP
prot := protocol.SchemeHTTP
if r.TLS != nil || r.Header.Get("X-Forwarded-Proto") == "https" {
protocol = schemeHTTPS
prot = protocol.SchemeHTTPS
}
url := fmt.Sprintf("%s://%s%s", protocol, r.Host, r.URL.Path)
url := fmt.Sprintf("%s://%s%s", prot, r.Host, r.URL.Path)
var cookies string
var env map[string]string
headers := map[string]string{}
if client := CurrentHub().Client(); client != nil {
if client.Options().SendDefaultPII {
// We read only the first Cookie header because of the specification:
// https://tools.ietf.org/html/rfc6265#section-5.4
// When the user agent generates an HTTP request, the user agent MUST NOT
// attach more than one Cookie header field.
cookies = r.Header.Get("Cookie")
if client := CurrentHub().Client(); client != nil && client.options.SendDefaultPII {
// We read only the first Cookie header because of the specification:
// https://tools.ietf.org/html/rfc6265#section-5.4
// When the user agent generates an HTTP request, the user agent MUST NOT
// attach more than one Cookie header field.
cookies = r.Header.Get("Cookie")
for k, v := range r.Header {
headers[k] = strings.Join(v, ",")
}
headers = make(map[string]string, len(r.Header))
for k, v := range r.Header {
headers[k] = strings.Join(v, ",")
}
if addr, port, err := net.SplitHostPort(r.RemoteAddr); err == nil {
env = map[string]string{"REMOTE_ADDR": addr, "REMOTE_PORT": port}
}
if addr, port, err := net.SplitHostPort(r.RemoteAddr); err == nil {
env = map[string]string{"REMOTE_ADDR": addr, "REMOTE_PORT": port}
}
} else {
sensitiveHeaders := getSensitiveHeaders()
for k, v := range r.Header {
if _, ok := sensitiveHeaders[k]; !ok {
headers[k] = strings.Join(v, ",")
@ -206,13 +322,33 @@ func NewRequest(r *http.Request) *Request {
}
}
// Mechanism is the mechanism by which an exception was generated and handled.
type Mechanism struct {
Type string `json:"type"`
Description string `json:"description,omitempty"`
HelpLink string `json:"help_link,omitempty"`
Source string `json:"source,omitempty"`
Handled *bool `json:"handled,omitempty"`
ParentID *int `json:"parent_id,omitempty"`
ExceptionID int `json:"exception_id"`
IsExceptionGroup bool `json:"is_exception_group,omitempty"`
Data map[string]any `json:"data,omitempty"`
}
// SetUnhandled indicates that the exception is an unhandled exception, i.e.
// from a panic.
func (m *Mechanism) SetUnhandled() {
m.Handled = Pointer(false)
}
// Exception specifies an error that occurred.
type Exception struct {
Type string `json:"type,omitempty"` // used as the main issue title
Value string `json:"value,omitempty"` // used as the main issue subtitle
Module string `json:"module,omitempty"`
ThreadID string `json:"thread_id,omitempty"`
ThreadID uint64 `json:"thread_id,omitempty"`
Stacktrace *Stacktrace `json:"stacktrace,omitempty"`
Mechanism *Mechanism `json:"mechanism,omitempty"`
}
// SDKMetaData is a struct to stash data which is needed at some point in the SDK's event processing pipeline
@ -226,6 +362,34 @@ type TransactionInfo struct {
Source TransactionSource `json:"source,omitempty"`
}
// The DebugMeta interface is not used in Golang apps, but may be populated
// when proxying Events from other platforms, like iOS, Android, and the
// Web. (See: https://develop.sentry.dev/sdk/event-payloads/debugmeta/ ).
type DebugMeta struct {
SdkInfo *DebugMetaSdkInfo `json:"sdk_info,omitempty"`
Images []DebugMetaImage `json:"images,omitempty"`
}
type DebugMetaSdkInfo struct {
SdkName string `json:"sdk_name,omitempty"`
VersionMajor int `json:"version_major,omitempty"`
VersionMinor int `json:"version_minor,omitempty"`
VersionPatchlevel int `json:"version_patchlevel,omitempty"`
}
type DebugMetaImage struct {
Type string `json:"type,omitempty"` // all
ImageAddr string `json:"image_addr,omitempty"` // macho,elf,pe
ImageSize int `json:"image_size,omitempty"` // macho,elf,pe
DebugID string `json:"debug_id,omitempty"` // macho,elf,pe,wasm,sourcemap
DebugFile string `json:"debug_file,omitempty"` // macho,elf,pe,wasm
CodeID string `json:"code_id,omitempty"` // macho,elf,pe,wasm
CodeFile string `json:"code_file,omitempty"` // macho,elf,pe,wasm,sourcemap
ImageVmaddr string `json:"image_vmaddr,omitempty"` // macho,elf,pe
Arch string `json:"arch,omitempty"` // macho,elf,pe
UUID string `json:"uuid,omitempty"` // proguard
}
// EventID is a hexadecimal string representing a unique uuid4 for an Event.
// An EventID must be 32 characters long, lowercase and not have any dashes.
type EventID string
@ -249,26 +413,123 @@ type Event struct {
ServerName string `json:"server_name,omitempty"`
Threads []Thread `json:"threads,omitempty"`
Tags map[string]string `json:"tags,omitempty"`
Timestamp time.Time `json:"timestamp"`
Timestamp time.Time `json:"timestamp,omitzero"`
Transaction string `json:"transaction,omitempty"`
User User `json:"user,omitempty"`
Logger string `json:"logger,omitempty"`
Modules map[string]string `json:"modules,omitempty"`
Request *Request `json:"request,omitempty"`
Exception []Exception `json:"exception,omitempty"`
DebugMeta *DebugMeta `json:"debug_meta,omitempty"`
Attachments []*Attachment `json:"-"`
// The fields below are only relevant for transactions.
Type string `json:"type,omitempty"`
StartTime time.Time `json:"start_timestamp"`
StartTime time.Time `json:"start_timestamp,omitzero"`
Spans []*Span `json:"spans,omitempty"`
TransactionInfo *TransactionInfo `json:"transaction_info,omitempty"`
// The fields below are only relevant for crons/check ins
CheckIn *CheckIn `json:"check_in,omitempty"`
MonitorConfig *MonitorConfig `json:"monitor_config,omitempty"`
// The fields below are only relevant for logs
Logs []Log `json:"-"`
// The fields below are only relevant for metrics
Metrics []Metric `json:"-"`
// The fields below are not part of the final JSON payload.
sdkMetaData SDKMetaData
}
// SetException appends the unwrapped errors to the event's exception list.
//
// maxErrorDepth is the maximum depth of the error chain we will look
// into while unwrapping the errors. If maxErrorDepth is -1, we will
// unwrap all errors in the chain.
func (e *Event) SetException(exception error, maxErrorDepth int) {
if exception == nil {
return
}
exceptions := convertErrorToExceptions(exception, maxErrorDepth)
if len(exceptions) == 0 {
return
}
e.Exception = exceptions
}
// ToEnvelopeItem converts the Event to a Sentry envelope item.
func (e *Event) ToEnvelopeItem() (*protocol.EnvelopeItem, error) {
eventBody, err := json.Marshal(e)
if err != nil {
// Try fallback: remove problematic fields and retry
e.Breadcrumbs = nil
e.Contexts = nil
e.Extra = map[string]interface{}{
"info": fmt.Sprintf("Could not encode original event as JSON. "+
"Succeeded by removing Breadcrumbs, Contexts and Extra. "+
"Please verify the data you attach to the scope. "+
"Error: %s", err),
}
eventBody, err = json.Marshal(e)
if err != nil {
return nil, fmt.Errorf("event could not be marshaled even with fallback: %w", err)
}
DebugLogger.Printf("Event marshaling succeeded with fallback after removing problematic fields")
}
// TODO: all event types should be abstracted to implement EnvelopeItemConvertible and convert themselves.
var item *protocol.EnvelopeItem
switch e.Type {
case transactionType:
item = protocol.NewEnvelopeItem(protocol.EnvelopeItemTypeTransaction, eventBody)
case checkInType:
item = protocol.NewEnvelopeItem(protocol.EnvelopeItemTypeCheckIn, eventBody)
case logEvent.Type:
item = protocol.NewLogItem(len(e.Logs), eventBody)
case traceMetricEvent.Type:
item = protocol.NewTraceMetricItem(len(e.Metrics), eventBody)
default:
item = protocol.NewEnvelopeItem(protocol.EnvelopeItemTypeEvent, eventBody)
}
return item, nil
}
// GetCategory returns the rate limit category for this event.
func (e *Event) GetCategory() ratelimit.Category {
return e.toCategory()
}
// GetEventID returns the event ID.
func (e *Event) GetEventID() string {
return string(e.EventID)
}
// GetSdkInfo returns SDK information for the envelope header.
func (e *Event) GetSdkInfo() *protocol.SdkInfo {
return &e.Sdk
}
// GetDynamicSamplingContext returns trace context for the envelope header.
func (e *Event) GetDynamicSamplingContext() map[string]string {
trace := make(map[string]string)
if dsc := e.sdkMetaData.dsc; dsc.HasEntries() {
for k, v := range dsc.Entries {
trace[k] = v
}
}
return trace
}
// TODO: Event.Contexts map[string]interface{} => map[string]EventContext,
// to prevent accidentally storing T when we mean *T.
// For example, the TraceContext must be stored as *TraceContext to pick up the
@ -277,15 +538,8 @@ type Event struct {
// MarshalJSON converts the Event struct to JSON.
func (e *Event) MarshalJSON() ([]byte, error) {
// We want to omit time.Time zero values, otherwise the server will try to
// interpret dates too far in the past. However, encoding/json doesn't
// support the "omitempty" option for struct types. See
// https://golang.org/issues/11939.
//
// We overcome the limitation and achieve what we want by shadowing fields
// and a few type tricks.
if e.Type == transactionType {
return e.transactionMarshalJSON()
if e.Type == checkInType {
return e.checkInMarshalJSON()
}
return e.defaultMarshalJSON()
}
@ -295,15 +549,33 @@ func (e *Event) defaultMarshalJSON() ([]byte, error) {
// loop. It preserves all fields while none of the attached methods.
type event Event
if e.Type == transactionType {
return json.Marshal(struct{ *event }{(*event)(e)})
}
// metrics and logs should be serialized under the same `items` json field.
if e.Type == logEvent.Type {
type logEvent struct {
*event
Items []Log `json:"items,omitempty"`
Type json.RawMessage `json:"type,omitempty"`
}
return json.Marshal(logEvent{event: (*event)(e), Items: e.Logs})
}
if e.Type == traceMetricEvent.Type {
type metricEvent struct {
*event
Items []Metric `json:"items,omitempty"`
Type json.RawMessage `json:"type,omitempty"`
}
return json.Marshal(metricEvent{event: (*event)(e), Items: e.Metrics})
}
// errorEvent is like Event with shadowed fields for customizing JSON
// marshaling.
type errorEvent struct {
*event
// Timestamp shadows the original Timestamp field. It allows us to
// include the timestamp when non-zero and omit it otherwise.
Timestamp json.RawMessage `json:"timestamp,omitempty"`
// The fields below are not part of error events and only make sense to
// be sent for transactions. They shadow the respective fields in Event
// and are meant to remain nil, triggering the omitempty behavior.
@ -315,60 +587,59 @@ func (e *Event) defaultMarshalJSON() ([]byte, error) {
}
x := errorEvent{event: (*event)(e)}
if !e.Timestamp.IsZero() {
b, err := e.Timestamp.MarshalJSON()
if err != nil {
return nil, err
}
x.Timestamp = b
}
return json.Marshal(x)
}
func (e *Event) transactionMarshalJSON() ([]byte, error) {
// event aliases Event to allow calling json.Marshal without an infinite
// loop. It preserves all fields while none of the attached methods.
type event Event
// transactionEvent is like Event with shadowed fields for customizing JSON
// marshaling.
type transactionEvent struct {
*event
// The fields below shadow the respective fields in Event. They allow us
// to include timestamps when non-zero and omit them otherwise.
StartTime json.RawMessage `json:"start_timestamp,omitempty"`
Timestamp json.RawMessage `json:"timestamp,omitempty"`
func (e *Event) checkInMarshalJSON() ([]byte, error) {
checkIn := serializedCheckIn{
CheckInID: string(e.CheckIn.ID),
MonitorSlug: e.CheckIn.MonitorSlug,
Status: e.CheckIn.Status,
Duration: e.CheckIn.Duration.Seconds(),
Release: e.Release,
Environment: e.Environment,
MonitorConfig: nil,
}
x := transactionEvent{event: (*event)(e)}
if !e.Timestamp.IsZero() {
b, err := e.Timestamp.MarshalJSON()
if err != nil {
return nil, err
if e.MonitorConfig != nil {
checkIn.MonitorConfig = &MonitorConfig{
Schedule: e.MonitorConfig.Schedule,
CheckInMargin: e.MonitorConfig.CheckInMargin,
MaxRuntime: e.MonitorConfig.MaxRuntime,
Timezone: e.MonitorConfig.Timezone,
FailureIssueThreshold: e.MonitorConfig.FailureIssueThreshold,
RecoveryThreshold: e.MonitorConfig.RecoveryThreshold,
}
x.Timestamp = b
}
if !e.StartTime.IsZero() {
b, err := e.StartTime.MarshalJSON()
if err != nil {
return nil, err
}
x.StartTime = b
return json.Marshal(checkIn)
}
func (e *Event) toCategory() ratelimit.Category {
switch e.Type {
case errorType:
return ratelimit.CategoryError
case transactionType:
return ratelimit.CategoryTransaction
case logEvent.Type:
return ratelimit.CategoryLog
case checkInType:
return ratelimit.CategoryMonitor
case traceMetricEvent.Type:
return ratelimit.CategoryTraceMetric
default:
return ratelimit.CategoryUnknown
}
return json.Marshal(x)
}
// NewEvent creates a new Event.
func NewEvent() *Event {
event := Event{
return &Event{
Contexts: make(map[string]Context),
Extra: make(map[string]interface{}),
Tags: make(map[string]string),
Modules: make(map[string]string),
}
return &event
}
// Thread specifies threads that were running at the time of an event.
@ -390,3 +661,125 @@ type EventHint struct {
Request *http.Request
Response *http.Response
}
type Log struct {
Timestamp time.Time `json:"timestamp,omitzero"`
TraceID TraceID `json:"trace_id"`
SpanID SpanID `json:"span_id,omitzero"`
Level LogLevel `json:"level"`
Severity int `json:"severity_number,omitempty"`
Body string `json:"body"`
Attributes map[string]attribute.Value `json:"attributes,omitempty"`
}
// GetCategory returns the rate limit category for logs.
func (l *Log) GetCategory() ratelimit.Category {
return ratelimit.CategoryLog
}
// GetEventID returns empty string (event ID set when batching).
func (l *Log) GetEventID() string {
return ""
}
// GetSdkInfo returns nil (SDK info set when batching).
func (l *Log) GetSdkInfo() *protocol.SdkInfo {
return nil
}
// GetDynamicSamplingContext returns nil (trace context set when batching).
func (l *Log) GetDynamicSamplingContext() map[string]string {
return nil
}
type MetricType string
const (
MetricTypeInvalid MetricType = ""
MetricTypeCounter MetricType = "counter"
MetricTypeGauge MetricType = "gauge"
MetricTypeDistribution MetricType = "distribution"
)
type Metric struct {
Timestamp time.Time `json:"timestamp,omitzero"`
TraceID TraceID `json:"trace_id"`
SpanID SpanID `json:"span_id,omitzero"`
Type MetricType `json:"type"`
Name string `json:"name"`
Value MetricValue `json:"value"`
Unit string `json:"unit,omitempty"`
Attributes map[string]attribute.Value `json:"attributes,omitempty"`
}
// GetCategory returns the rate limit category for metrics.
func (m *Metric) GetCategory() ratelimit.Category {
return ratelimit.CategoryTraceMetric
}
// GetEventID returns empty string (event ID set when batching).
func (m *Metric) GetEventID() string {
return ""
}
// GetSdkInfo returns nil (SDK info set when batching).
func (m *Metric) GetSdkInfo() *protocol.SdkInfo {
return nil
}
// GetDynamicSamplingContext returns nil (trace context set when batching).
func (m *Metric) GetDynamicSamplingContext() map[string]string {
return nil
}
// MetricValue stores metric values with full precision.
// It supports int64 (for counters) and float64 (for gauges and distributions).
type MetricValue struct {
value attribute.Value
}
// Int64MetricValue creates a MetricValue from an int64.
// Used for counter metrics to preserve full int64 precision.
func Int64MetricValue(v int64) MetricValue {
return MetricValue{value: attribute.Int64Value(v)}
}
// Float64MetricValue creates a MetricValue from a float64.
// Used for gauge and distribution metrics.
func Float64MetricValue(v float64) MetricValue {
return MetricValue{value: attribute.Float64Value(v)}
}
// Type returns the type of the stored value (attribute.INT64 or attribute.FLOAT64).
func (v MetricValue) Type() attribute.Type {
return v.value.Type()
}
// Int64 returns the value as int64 if it holds an int64.
// The second return value indicates whether the type matched.
func (v MetricValue) Int64() (int64, bool) {
if v.value.Type() == attribute.INT64 {
return v.value.AsInt64(), true
}
return 0, false
}
// Float64 returns the value as float64 if it holds a float64.
// The second return value indicates whether the type matched.
func (v MetricValue) Float64() (float64, bool) {
if v.value.Type() == attribute.FLOAT64 {
return v.value.AsFloat64(), true
}
return 0, false
}
// AsInterface returns the value as int64 or float64.
// Use type assertion or type switch to handle the result.
func (v MetricValue) AsInterface() any {
return v.value.AsInterface()
}
// MarshalJSON serializes the value as a bare number.
func (v MetricValue) MarshalJSON() ([]byte, error) {
return json.Marshal(v.value.AsInterface())
}

View File

@ -0,0 +1,35 @@
package debuglog
import (
"io"
"log"
)
// logger is the global debug logger instance.
var logger = log.New(io.Discard, "[Sentry] ", log.LstdFlags)
// SetOutput changes the output destination of the logger.
func SetOutput(w io.Writer) {
logger.SetOutput(w)
}
// GetLogger returns the current logger instance.
// This function is thread-safe and can be called concurrently.
func GetLogger() *log.Logger {
return logger
}
// Printf calls Printf on the underlying logger.
func Printf(format string, args ...interface{}) {
logger.Printf(format, args...)
}
// Println calls Println on the underlying logger.
func Println(args ...interface{}) {
logger.Println(args...)
}
// Print calls Print on the underlying logger.
func Print(args ...interface{}) {
logger.Print(args...)
}

View File

@ -0,0 +1,542 @@
package http
import (
"bytes"
"context"
"crypto/tls"
"crypto/x509"
"errors"
"fmt"
"io"
"net/http"
"net/url"
"sync"
"sync/atomic"
"time"
"github.com/getsentry/sentry-go/internal/debuglog"
"github.com/getsentry/sentry-go/internal/protocol"
"github.com/getsentry/sentry-go/internal/ratelimit"
"github.com/getsentry/sentry-go/internal/util"
)
const (
apiVersion = 7
defaultTimeout = time.Second * 30
defaultQueueSize = 1000
)
var (
ErrTransportQueueFull = errors.New("transport queue full")
ErrTransportClosed = errors.New("transport is closed")
ErrEmptyEnvelope = errors.New("empty envelope provided")
)
type TransportOptions struct {
Dsn string
HTTPClient *http.Client
HTTPTransport http.RoundTripper
HTTPProxy string
HTTPSProxy string
CaCerts *x509.CertPool
}
func getProxyConfig(options TransportOptions) func(*http.Request) (*url.URL, error) {
if len(options.HTTPSProxy) > 0 {
return func(*http.Request) (*url.URL, error) {
return url.Parse(options.HTTPSProxy)
}
}
if len(options.HTTPProxy) > 0 {
return func(*http.Request) (*url.URL, error) {
return url.Parse(options.HTTPProxy)
}
}
return http.ProxyFromEnvironment
}
func getTLSConfig(options TransportOptions) *tls.Config {
if options.CaCerts != nil {
return &tls.Config{
RootCAs: options.CaCerts,
MinVersion: tls.VersionTLS12,
}
}
return nil
}
func getSentryRequestFromEnvelope(ctx context.Context, dsn *protocol.Dsn, envelope *protocol.Envelope) (r *http.Request, err error) {
defer func() {
if r != nil {
sdkName := envelope.Header.Sdk.Name
sdkVersion := envelope.Header.Sdk.Version
r.Header.Set("User-Agent", fmt.Sprintf("%s/%s", sdkName, sdkVersion))
r.Header.Set("Content-Type", "application/x-sentry-envelope")
auth := fmt.Sprintf("Sentry sentry_version=%d, "+
"sentry_client=%s/%s, sentry_key=%s", apiVersion, sdkName, sdkVersion, dsn.GetPublicKey())
if dsn.GetSecretKey() != "" {
auth = fmt.Sprintf("%s, sentry_secret=%s", auth, dsn.GetSecretKey())
}
r.Header.Set("X-Sentry-Auth", auth)
}
}()
var buf bytes.Buffer
_, err = envelope.WriteTo(&buf)
if err != nil {
return nil, err
}
return http.NewRequestWithContext(
ctx,
http.MethodPost,
dsn.GetAPIURL().String(),
&buf,
)
}
func categoryFromEnvelope(envelope *protocol.Envelope) ratelimit.Category {
if envelope == nil || len(envelope.Items) == 0 {
return ratelimit.CategoryAll
}
for _, item := range envelope.Items {
if item == nil || item.Header == nil {
continue
}
switch item.Header.Type {
case protocol.EnvelopeItemTypeEvent:
return ratelimit.CategoryError
case protocol.EnvelopeItemTypeTransaction:
return ratelimit.CategoryTransaction
case protocol.EnvelopeItemTypeCheckIn:
return ratelimit.CategoryMonitor
case protocol.EnvelopeItemTypeLog:
return ratelimit.CategoryLog
case protocol.EnvelopeItemTypeAttachment:
continue
default:
return ratelimit.CategoryAll
}
}
return ratelimit.CategoryAll
}
// SyncTransport is a blocking implementation of Transport.
//
// Clients using this transport will send requests to Sentry sequentially and
// block until a response is returned.
//
// The blocking behavior is useful in a limited set of use cases. For example,
// use it when deploying code to a Function as a Service ("Serverless")
// platform, where any work happening in a background goroutine is not
// guaranteed to execute.
//
// For most cases, prefer AsyncTransport.
type SyncTransport struct {
dsn *protocol.Dsn
client *http.Client
transport http.RoundTripper
mu sync.Mutex
limits ratelimit.Map
Timeout time.Duration
}
func NewSyncTransport(options TransportOptions) protocol.TelemetryTransport {
dsn, err := protocol.NewDsn(options.Dsn)
if err != nil || dsn == nil {
debuglog.Printf("Transport is disabled: invalid dsn: %v\n", err)
return NewNoopTransport()
}
transport := &SyncTransport{
Timeout: defaultTimeout,
limits: make(ratelimit.Map),
dsn: dsn,
}
if options.HTTPTransport != nil {
transport.transport = options.HTTPTransport
} else {
transport.transport = &http.Transport{
Proxy: getProxyConfig(options),
TLSClientConfig: getTLSConfig(options),
}
}
if options.HTTPClient != nil {
transport.client = options.HTTPClient
} else {
transport.client = &http.Client{
Transport: transport.transport,
Timeout: transport.Timeout,
}
}
return transport
}
func (t *SyncTransport) SendEnvelope(envelope *protocol.Envelope) error {
return t.SendEnvelopeWithContext(context.Background(), envelope)
}
func (t *SyncTransport) Close() {}
func (t *SyncTransport) IsRateLimited(category ratelimit.Category) bool {
return t.disabled(category)
}
func (t *SyncTransport) HasCapacity() bool { return true }
func (t *SyncTransport) SendEnvelopeWithContext(ctx context.Context, envelope *protocol.Envelope) error {
if envelope == nil || len(envelope.Items) == 0 {
return ErrEmptyEnvelope
}
category := categoryFromEnvelope(envelope)
if t.disabled(category) {
return nil
}
request, err := getSentryRequestFromEnvelope(ctx, t.dsn, envelope)
if err != nil {
debuglog.Printf("There was an issue creating the request: %v", err)
return err
}
identifier := util.EnvelopeIdentifier(envelope)
debuglog.Printf(
"Sending %s to %s project: %s",
identifier,
t.dsn.GetHost(),
t.dsn.GetProjectID(),
)
response, err := t.client.Do(request)
if err != nil {
debuglog.Printf("There was an issue with sending an event: %v", err)
return err
}
util.HandleHTTPResponse(response, identifier)
t.mu.Lock()
if t.limits == nil {
t.limits = make(ratelimit.Map)
}
t.limits.Merge(ratelimit.FromResponse(response))
t.mu.Unlock()
_, _ = io.CopyN(io.Discard, response.Body, util.MaxDrainResponseBytes)
return response.Body.Close()
}
func (t *SyncTransport) Flush(_ time.Duration) bool {
return true
}
func (t *SyncTransport) FlushWithContext(_ context.Context) bool {
return true
}
func (t *SyncTransport) disabled(c ratelimit.Category) bool {
t.mu.Lock()
defer t.mu.Unlock()
disabled := t.limits.IsRateLimited(c)
if disabled {
debuglog.Printf("Too many requests for %q, backing off till: %v", c, t.limits.Deadline(c))
}
return disabled
}
// AsyncTransport is the default, non-blocking, implementation of Transport.
//
// Clients using this transport will enqueue requests in a queue and return to
// the caller before any network communication has happened. Requests are sent
// to Sentry sequentially from a background goroutine.
type AsyncTransport struct {
dsn *protocol.Dsn
client *http.Client
transport http.RoundTripper
queue chan *protocol.Envelope
mu sync.RWMutex
limits ratelimit.Map
done chan struct{}
wg sync.WaitGroup
flushRequest chan chan struct{}
sentCount int64
droppedCount int64
errorCount int64
QueueSize int
Timeout time.Duration
startOnce sync.Once
closeOnce sync.Once
}
func NewAsyncTransport(options TransportOptions) protocol.TelemetryTransport {
dsn, err := protocol.NewDsn(options.Dsn)
if err != nil || dsn == nil {
debuglog.Printf("Transport is disabled: invalid dsn: %v", err)
return NewNoopTransport()
}
transport := &AsyncTransport{
QueueSize: defaultQueueSize,
Timeout: defaultTimeout,
done: make(chan struct{}),
limits: make(ratelimit.Map),
dsn: dsn,
}
transport.queue = make(chan *protocol.Envelope, transport.QueueSize)
transport.flushRequest = make(chan chan struct{})
if options.HTTPTransport != nil {
transport.transport = options.HTTPTransport
} else {
transport.transport = &http.Transport{
Proxy: getProxyConfig(options),
TLSClientConfig: getTLSConfig(options),
}
}
if options.HTTPClient != nil {
transport.client = options.HTTPClient
} else {
transport.client = &http.Client{
Transport: transport.transport,
Timeout: transport.Timeout,
}
}
transport.start()
return transport
}
func (t *AsyncTransport) start() {
t.startOnce.Do(func() {
t.wg.Add(1)
go t.worker()
})
}
// HasCapacity reports whether the async transport queue appears to have space
// for at least one more envelope. This is a best-effort, non-blocking check.
func (t *AsyncTransport) HasCapacity() bool {
t.mu.RLock()
defer t.mu.RUnlock()
select {
case <-t.done:
return false
default:
}
return len(t.queue) < cap(t.queue)
}
func (t *AsyncTransport) SendEnvelope(envelope *protocol.Envelope) error {
select {
case <-t.done:
return ErrTransportClosed
default:
}
if envelope == nil || len(envelope.Items) == 0 {
return ErrEmptyEnvelope
}
category := categoryFromEnvelope(envelope)
if t.isRateLimited(category) {
return nil
}
select {
case t.queue <- envelope:
identifier := util.EnvelopeIdentifier(envelope)
debuglog.Printf(
"Sending %s to %s project: %s",
identifier,
t.dsn.GetHost(),
t.dsn.GetProjectID(),
)
return nil
default:
atomic.AddInt64(&t.droppedCount, 1)
return ErrTransportQueueFull
}
}
func (t *AsyncTransport) Flush(timeout time.Duration) bool {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
return t.FlushWithContext(ctx)
}
func (t *AsyncTransport) FlushWithContext(ctx context.Context) bool {
flushResponse := make(chan struct{})
select {
case t.flushRequest <- flushResponse:
select {
case <-flushResponse:
debuglog.Println("Buffer flushed successfully.")
return true
case <-ctx.Done():
debuglog.Println("Failed to flush, buffer timed out.")
return false
}
case <-ctx.Done():
debuglog.Println("Failed to flush, buffer timed out.")
return false
}
}
func (t *AsyncTransport) Close() {
t.closeOnce.Do(func() {
close(t.done)
close(t.queue)
close(t.flushRequest)
t.wg.Wait()
})
}
func (t *AsyncTransport) IsRateLimited(category ratelimit.Category) bool {
return t.isRateLimited(category)
}
func (t *AsyncTransport) worker() {
defer t.wg.Done()
for {
select {
case <-t.done:
return
case envelope, open := <-t.queue:
if !open {
return
}
t.processEnvelope(envelope)
case flushResponse, open := <-t.flushRequest:
if !open {
return
}
t.drainQueue()
close(flushResponse)
}
}
}
func (t *AsyncTransport) drainQueue() {
for {
select {
case envelope, open := <-t.queue:
if !open {
return
}
t.processEnvelope(envelope)
default:
return
}
}
}
func (t *AsyncTransport) processEnvelope(envelope *protocol.Envelope) {
if t.sendEnvelopeHTTP(envelope) {
atomic.AddInt64(&t.sentCount, 1)
} else {
atomic.AddInt64(&t.errorCount, 1)
}
}
func (t *AsyncTransport) sendEnvelopeHTTP(envelope *protocol.Envelope) bool {
category := categoryFromEnvelope(envelope)
if t.isRateLimited(category) {
return false
}
ctx, cancel := context.WithTimeout(context.Background(), defaultTimeout)
defer cancel()
request, err := getSentryRequestFromEnvelope(ctx, t.dsn, envelope)
if err != nil {
debuglog.Printf("Failed to create request from envelope: %v", err)
return false
}
response, err := t.client.Do(request)
if err != nil {
debuglog.Printf("HTTP request failed: %v", err)
return false
}
defer response.Body.Close()
identifier := util.EnvelopeIdentifier(envelope)
success := util.HandleHTTPResponse(response, identifier)
t.mu.Lock()
if t.limits == nil {
t.limits = make(ratelimit.Map)
}
t.limits.Merge(ratelimit.FromResponse(response))
t.mu.Unlock()
_, _ = io.CopyN(io.Discard, response.Body, util.MaxDrainResponseBytes)
return success
}
func (t *AsyncTransport) isRateLimited(category ratelimit.Category) bool {
t.mu.RLock()
defer t.mu.RUnlock()
limited := t.limits.IsRateLimited(category)
if limited {
debuglog.Printf("Rate limited for category %q until %v", category, t.limits.Deadline(category))
}
return limited
}
// NoopTransport is a transport implementation that drops all events.
// Used internally when an empty or invalid DSN is provided.
type NoopTransport struct{}
func NewNoopTransport() *NoopTransport {
debuglog.Println("Transport initialized with invalid DSN. Using NoopTransport. No events will be delivered.")
return &NoopTransport{}
}
func (t *NoopTransport) SendEnvelope(_ *protocol.Envelope) error {
debuglog.Println("Envelope dropped due to NoopTransport usage.")
return nil
}
func (t *NoopTransport) IsRateLimited(_ ratelimit.Category) bool {
return false
}
func (t *NoopTransport) Flush(_ time.Duration) bool {
return true
}
func (t *NoopTransport) FlushWithContext(_ context.Context) bool {
return true
}
func (t *NoopTransport) Close() {
// Nothing to close
}
func (t *NoopTransport) HasCapacity() bool { return true }

View File

@ -0,0 +1,12 @@
## Why do we have this "otel/baggage" folder?
The root sentry-go SDK (namely, the Dynamic Sampling functionality) needs an implementation of the [baggage spec](https://www.w3.org/TR/baggage/).
For that reason, we've taken the existing baggage implementation from the [opentelemetry-go](https://github.com/open-telemetry/opentelemetry-go/) repository, and fixed a few things that in our opinion were violating the specification.
These issues are:
1. Baggage string value `one%20two` should be properly parsed as "one two"
1. Baggage string value `one+two` should be parsed as "one+two"
1. Go string value "one two" should be encoded as `one%20two` (percent encoding), and NOT as `one+two` (URL query encoding).
1. Go string value "1=1" might be encoded as `1=1`, because the spec says: "Note, value MAY contain any number of the equal sign (=) characters. Parsers MUST NOT assume that the equal sign is only used to separate key and value.". `1%3D1` is also valid, but to simplify the implementation we're not doing it.
Changes were made in this PR: https://github.com/getsentry/sentry-go/pull/568

View File

@ -1,7 +1,6 @@
// This file was vendored in unmodified from
// https://github.com/open-telemetry/opentelemetry-go/blob/c21b6b6bb31a2f74edd06e262f1690f3f6ea3d5c/baggage/baggage.go
// Adapted from https://github.com/open-telemetry/opentelemetry-go/blob/c21b6b6bb31a2f74edd06e262f1690f3f6ea3d5c/baggage/baggage.go
//
// # Copyright The OpenTelemetry Authors
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -23,6 +22,7 @@ import (
"net/url"
"regexp"
"strings"
"unicode/utf8"
"github.com/getsentry/sentry-go/internal/otel/baggage/internal/baggage"
)
@ -267,11 +267,12 @@ func NewMember(key, value string, props ...Property) (Member, error) {
if err := m.validate(); err != nil {
return newInvalidMember(), err
}
decodedValue, err := url.QueryUnescape(value)
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
m.value = decodedValue
//// NOTE(anton): I don't think we need to unescape here
// decodedValue, err := url.PathUnescape(value)
// if err != nil {
// return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
// }
// m.value = decodedValue
return m, nil
}
@ -315,17 +316,19 @@ func parseMember(member string) (Member, error) {
// "Leading and trailing whitespaces are allowed but MUST be trimmed
// when converting the header into a data structure."
key = strings.TrimSpace(kv[0])
value = strings.TrimSpace(kv[1])
var err error
value, err = url.QueryUnescape(strings.TrimSpace(kv[1]))
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
}
if !keyRe.MatchString(key) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key)
}
if !valueRe.MatchString(value) {
return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value)
}
decodedValue, err := url.PathUnescape(value)
if err != nil {
return newInvalidMember(), fmt.Errorf("%w: %q", err, value)
}
value = decodedValue
default:
// This should never happen unless a developer has changed the string
// splitting somehow. Panic instead of failing silently and allowing
@ -347,9 +350,10 @@ func (m Member) validate() error {
if !keyRe.MatchString(m.key) {
return fmt.Errorf("%w: %q", errInvalidKey, m.key)
}
if !valueRe.MatchString(m.value) {
return fmt.Errorf("%w: %q", errInvalidValue, m.value)
}
//// NOTE(anton): IMO it's too early to validate the value here.
// if !valueRe.MatchString(m.value) {
// return fmt.Errorf("%w: %q", errInvalidValue, m.value)
// }
return m.properties.validate()
}
@ -366,13 +370,40 @@ func (m Member) Properties() []Property { return m.properties.Copy() }
// specification.
func (m Member) String() string {
// A key is just an ASCII string, but a value is URL encoded UTF-8.
s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value))
s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, percentEncodeValue(m.value))
if len(m.properties) > 0 {
s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String())
}
return s
}
// percentEncodeValue encodes the baggage value, using percent-encoding for
// disallowed octets.
func percentEncodeValue(s string) string {
const upperhex = "0123456789ABCDEF"
var sb strings.Builder
for byteIndex, width := 0, 0; byteIndex < len(s); byteIndex += width {
runeValue, w := utf8.DecodeRuneInString(s[byteIndex:])
width = w
char := string(runeValue)
if valueRe.MatchString(char) && char != "%" {
// The character is returned as is, no need to percent-encode
sb.WriteString(char)
} else {
// We need to percent-encode each byte of the multi-octet character
for j := 0; j < width; j++ {
b := s[byteIndex+j]
sb.WriteByte('%')
// Bitwise operations are inspired by "net/url"
sb.WriteByte(upperhex[b>>4])
sb.WriteByte(upperhex[b&15])
}
}
}
return sb.String()
}
// Baggage is a list of baggage members representing the baggage-string as
// defined by the W3C Baggage specification.
type Baggage struct { //nolint:golint

View File

@ -1,5 +1,4 @@
// This file was vendored in unmodified from
// https://github.com/open-telemetry/opentelemetry-go/blob/c21b6b6bb31a2f74edd06e262f1690f3f6ea3d5c/internal/baggage/baggage.go
// Adapted from https://github.com/open-telemetry/opentelemetry-go/blob/c21b6b6bb31a2f74edd06e262f1690f3f6ea3d5c/internal/baggage/baggage.go
//
// Copyright The OpenTelemetry Authors
//

View File

@ -0,0 +1,236 @@
package protocol
import (
"encoding/json"
"fmt"
"net/url"
"strconv"
"strings"
"time"
)
// apiVersion is the version of the Sentry API.
const apiVersion = "7"
type scheme string
const (
SchemeHTTP scheme = "http"
SchemeHTTPS scheme = "https"
)
func (scheme scheme) defaultPort() int {
switch scheme {
case SchemeHTTPS:
return 443
case SchemeHTTP:
return 80
default:
return 80
}
}
// DsnParseError represents an error that occurs if a Sentry
// DSN cannot be parsed.
type DsnParseError struct {
Message string
}
func (e DsnParseError) Error() string {
return "[Sentry] DsnParseError: " + e.Message
}
// Dsn is used as the remote address source to client transport.
type Dsn struct {
scheme scheme
publicKey string
secretKey string
host string
port int
path string
projectID string
}
// NewDsn creates a Dsn by parsing rawURL. Most users will never call this
// function directly. It is provided for use in custom Transport
// implementations.
func NewDsn(rawURL string) (*Dsn, error) {
// Parse
parsedURL, err := url.Parse(rawURL)
if err != nil {
return nil, &DsnParseError{fmt.Sprintf("invalid url: %v", err)}
}
// Scheme
var scheme scheme
switch parsedURL.Scheme {
case "http":
scheme = SchemeHTTP
case "https":
scheme = SchemeHTTPS
default:
return nil, &DsnParseError{"invalid scheme"}
}
// PublicKey
publicKey := parsedURL.User.Username()
if publicKey == "" {
return nil, &DsnParseError{"empty username"}
}
// SecretKey
var secretKey string
if parsedSecretKey, ok := parsedURL.User.Password(); ok {
secretKey = parsedSecretKey
}
// Host
host := parsedURL.Hostname()
if host == "" {
return nil, &DsnParseError{"empty host"}
}
// Port
var port int
if p := parsedURL.Port(); p != "" {
port, err = strconv.Atoi(p)
if err != nil {
return nil, &DsnParseError{"invalid port"}
}
} else {
port = scheme.defaultPort()
}
// ProjectID
if parsedURL.Path == "" || parsedURL.Path == "/" {
return nil, &DsnParseError{"empty project id"}
}
pathSegments := strings.Split(parsedURL.Path[1:], "/")
projectID := pathSegments[len(pathSegments)-1]
if projectID == "" {
return nil, &DsnParseError{"empty project id"}
}
// Path
var path string
if len(pathSegments) > 1 {
path = "/" + strings.Join(pathSegments[0:len(pathSegments)-1], "/")
}
return &Dsn{
scheme: scheme,
publicKey: publicKey,
secretKey: secretKey,
host: host,
port: port,
path: path,
projectID: projectID,
}, nil
}
// String formats Dsn struct into a valid string url.
func (dsn Dsn) String() string {
var url string
url += fmt.Sprintf("%s://%s", dsn.scheme, dsn.publicKey)
if dsn.secretKey != "" {
url += fmt.Sprintf(":%s", dsn.secretKey)
}
url += fmt.Sprintf("@%s", dsn.host)
if dsn.port != dsn.scheme.defaultPort() {
url += fmt.Sprintf(":%d", dsn.port)
}
if dsn.path != "" {
url += dsn.path
}
url += fmt.Sprintf("/%s", dsn.projectID)
return url
}
// Get the scheme of the DSN.
func (dsn Dsn) GetScheme() string {
return string(dsn.scheme)
}
// Get the public key of the DSN.
func (dsn Dsn) GetPublicKey() string {
return dsn.publicKey
}
// Get the secret key of the DSN.
func (dsn Dsn) GetSecretKey() string {
return dsn.secretKey
}
// Get the host of the DSN.
func (dsn Dsn) GetHost() string {
return dsn.host
}
// Get the port of the DSN.
func (dsn Dsn) GetPort() int {
return dsn.port
}
// Get the path of the DSN.
func (dsn Dsn) GetPath() string {
return dsn.path
}
// Get the project ID of the DSN.
func (dsn Dsn) GetProjectID() string {
return dsn.projectID
}
// GetAPIURL returns the URL of the envelope endpoint of the project
// associated with the DSN.
func (dsn Dsn) GetAPIURL() *url.URL {
var rawURL string
rawURL += fmt.Sprintf("%s://%s", dsn.scheme, dsn.host)
if dsn.port != dsn.scheme.defaultPort() {
rawURL += fmt.Sprintf(":%d", dsn.port)
}
if dsn.path != "" {
rawURL += dsn.path
}
rawURL += fmt.Sprintf("/api/%s/%s/", dsn.projectID, "envelope")
parsedURL, _ := url.Parse(rawURL)
return parsedURL
}
// RequestHeaders returns all the necessary headers that have to be used in the transport when sending events
// to the /store endpoint.
//
// Deprecated: This method shall only be used if you want to implement your own transport that sends events to
// the /store endpoint. If you're using the transport provided by the SDK, all necessary headers to authenticate
// against the /envelope endpoint are added automatically.
func (dsn Dsn) RequestHeaders(sdkVersion string) map[string]string {
auth := fmt.Sprintf("Sentry sentry_version=%s, sentry_timestamp=%d, "+
"sentry_client=sentry.go/%s, sentry_key=%s", apiVersion, time.Now().Unix(), sdkVersion, dsn.publicKey)
if dsn.secretKey != "" {
auth = fmt.Sprintf("%s, sentry_secret=%s", auth, dsn.secretKey)
}
return map[string]string{
"Content-Type": "application/json",
"X-Sentry-Auth": auth,
}
}
// MarshalJSON converts the Dsn struct to JSON.
func (dsn Dsn) MarshalJSON() ([]byte, error) {
return json.Marshal(dsn.String())
}
// UnmarshalJSON converts JSON data to the Dsn struct.
func (dsn *Dsn) UnmarshalJSON(data []byte) error {
var str string
_ = json.Unmarshal(data, &str)
newDsn, err := NewDsn(str)
if err != nil {
return err
}
*dsn = *newDsn
return nil
}

View File

@ -0,0 +1,225 @@
package protocol
import (
"bytes"
"encoding/json"
"fmt"
"io"
"time"
)
// Envelope represents a Sentry envelope containing headers and items.
type Envelope struct {
Header *EnvelopeHeader `json:"-"`
Items []*EnvelopeItem `json:"-"`
}
// EnvelopeHeader represents the header of a Sentry envelope.
type EnvelopeHeader struct {
// EventID is the unique identifier for this event
EventID string `json:"event_id"`
// SentAt is the timestamp when the event was sent from the SDK as string in RFC 3339 format.
// Used for clock drift correction of the event timestamp. The time zone must be UTC.
SentAt time.Time `json:"sent_at,omitzero"`
// Dsn can be used for self-authenticated envelopes.
// This means that the envelope has all the information necessary to be sent to sentry.
// In this case the full DSN must be stored in this key.
Dsn string `json:"dsn,omitempty"`
// Sdk carries the same payload as the sdk interface in the event payload but can be carried for all events.
// This means that SDK information can be carried for minidumps, session data and other submissions.
Sdk *SdkInfo `json:"sdk,omitempty"`
// Trace contains the [Dynamic Sampling Context](https://develop.sentry.dev/sdk/telemetry/traces/dynamic-sampling-context/)
Trace map[string]string `json:"trace,omitempty"`
}
// EnvelopeItemType represents the type of envelope item.
type EnvelopeItemType string
// Constants for envelope item types as defined in the Sentry documentation.
const (
EnvelopeItemTypeEvent EnvelopeItemType = "event"
EnvelopeItemTypeTransaction EnvelopeItemType = "transaction"
EnvelopeItemTypeCheckIn EnvelopeItemType = "check_in"
EnvelopeItemTypeAttachment EnvelopeItemType = "attachment"
EnvelopeItemTypeLog EnvelopeItemType = "log"
EnvelopeItemTypeTraceMetric EnvelopeItemType = "trace_metric"
)
// EnvelopeItemHeader represents the header of an envelope item.
type EnvelopeItemHeader struct {
// Type specifies the type of this Item and its contents.
// Based on the Item type, more headers may be required.
Type EnvelopeItemType `json:"type"`
// Length is the length of the payload in bytes.
// If no length is specified, the payload implicitly goes to the next newline.
// For payloads containing newline characters, the length must be specified.
Length *int `json:"length,omitempty"`
// Filename is the name of the attachment file (used for attachments)
Filename string `json:"filename,omitempty"`
// ContentType is the MIME type of the item payload (used for attachments and some other item types)
ContentType string `json:"content_type,omitempty"`
// ItemCount is the number of items in a batch (used for logs)
ItemCount *int `json:"item_count,omitempty"`
}
// EnvelopeItem represents a single item or batch within an envelope.
type EnvelopeItem struct {
Header *EnvelopeItemHeader `json:"-"`
Payload []byte `json:"-"`
}
// NewEnvelope creates a new envelope with the given header.
func NewEnvelope(header *EnvelopeHeader) *Envelope {
return &Envelope{
Header: header,
Items: make([]*EnvelopeItem, 0),
}
}
// AddItem adds an item to the envelope.
func (e *Envelope) AddItem(item *EnvelopeItem) {
if item == nil {
return
}
e.Items = append(e.Items, item)
}
// Serialize serializes the envelope to the Sentry envelope format.
//
// Format: Headers "\n" { Item } [ "\n" ]
// Item: Headers "\n" Payload "\n".
func (e *Envelope) Serialize() ([]byte, error) {
var buf bytes.Buffer
headerBytes, err := json.Marshal(e.Header)
if err != nil {
return nil, fmt.Errorf("failed to marshal envelope header: %w", err)
}
if _, err := buf.Write(headerBytes); err != nil {
return nil, fmt.Errorf("failed to write envelope header: %w", err)
}
if _, err := buf.WriteString("\n"); err != nil {
return nil, fmt.Errorf("failed to write newline after envelope header: %w", err)
}
for _, item := range e.Items {
if err := e.writeItem(&buf, item); err != nil {
return nil, fmt.Errorf("failed to write envelope item: %w", err)
}
}
return buf.Bytes(), nil
}
// WriteTo writes the envelope to the given writer in the Sentry envelope format.
func (e *Envelope) WriteTo(w io.Writer) (int64, error) {
data, err := e.Serialize()
if err != nil {
return 0, err
}
n, err := w.Write(data)
return int64(n), err
}
// writeItem writes a single envelope item to the buffer.
func (e *Envelope) writeItem(buf *bytes.Buffer, item *EnvelopeItem) error {
headerBytes, err := json.Marshal(item.Header)
if err != nil {
return fmt.Errorf("failed to marshal item header: %w", err)
}
if _, err := buf.Write(headerBytes); err != nil {
return fmt.Errorf("failed to write item header: %w", err)
}
if _, err := buf.WriteString("\n"); err != nil {
return fmt.Errorf("failed to write newline after item header: %w", err)
}
if len(item.Payload) > 0 {
if _, err := buf.Write(item.Payload); err != nil {
return fmt.Errorf("failed to write item payload: %w", err)
}
}
if _, err := buf.WriteString("\n"); err != nil {
return fmt.Errorf("failed to write newline after item payload: %w", err)
}
return nil
}
// Size returns the total size of the envelope when serialized.
func (e *Envelope) Size() (int, error) {
data, err := e.Serialize()
if err != nil {
return 0, err
}
return len(data), nil
}
// NewEnvelopeItem creates a new envelope item with the specified type and payload.
func NewEnvelopeItem(itemType EnvelopeItemType, payload []byte) *EnvelopeItem {
length := len(payload)
return &EnvelopeItem{
Header: &EnvelopeItemHeader{
Type: itemType,
Length: &length,
},
Payload: payload,
}
}
// NewAttachmentItem creates a new envelope item for an attachment.
// Parameters: filename, contentType, payload.
func NewAttachmentItem(filename, contentType string, payload []byte) *EnvelopeItem {
length := len(payload)
return &EnvelopeItem{
Header: &EnvelopeItemHeader{
Type: EnvelopeItemTypeAttachment,
Length: &length,
ContentType: contentType,
Filename: filename,
},
Payload: payload,
}
}
// NewLogItem creates a new envelope item for logs.
func NewLogItem(itemCount int, payload []byte) *EnvelopeItem {
length := len(payload)
return &EnvelopeItem{
Header: &EnvelopeItemHeader{
Type: EnvelopeItemTypeLog,
Length: &length,
ItemCount: &itemCount,
ContentType: "application/vnd.sentry.items.log+json",
},
Payload: payload,
}
}
// NewTraceMetricItem creates a new envelope item for trace metrics.
func NewTraceMetricItem(itemCount int, payload []byte) *EnvelopeItem {
length := len(payload)
return &EnvelopeItem{
Header: &EnvelopeItemHeader{
Type: EnvelopeItemTypeTraceMetric,
Length: &length,
ItemCount: &itemCount,
ContentType: "application/vnd.sentry.items.trace-metric+json",
},
Payload: payload,
}
}

View File

@ -0,0 +1,56 @@
package protocol
import (
"context"
"time"
"github.com/getsentry/sentry-go/internal/ratelimit"
)
// TelemetryItem represents any telemetry data that can be stored in buffers and sent to Sentry.
// This is the base interface that all telemetry items must implement.
type TelemetryItem interface {
// GetCategory returns the rate limit category for this item.
GetCategory() ratelimit.Category
// GetEventID returns the event ID for this item.
GetEventID() string
// GetSdkInfo returns SDK information for the envelope header.
GetSdkInfo() *SdkInfo
// GetDynamicSamplingContext returns trace context for the envelope header.
GetDynamicSamplingContext() map[string]string
}
// EnvelopeItemConvertible represents items that can be converted directly to envelope items.
type EnvelopeItemConvertible interface {
TelemetryItem
// ToEnvelopeItem converts the item to a Sentry envelope item.
ToEnvelopeItem() (*EnvelopeItem, error)
}
// TelemetryTransport represents the envelope-first transport interface.
// This interface is designed for the telemetry buffer system and provides
// non-blocking sends with backpressure signals.
type TelemetryTransport interface {
// SendEnvelope sends an envelope to Sentry. Returns immediately with
// backpressure error if the queue is full.
SendEnvelope(envelope *Envelope) error
// HasCapacity reports whether the transport has capacity to accept at least one more envelope.
HasCapacity() bool
// IsRateLimited checks if a specific category is currently rate limited
IsRateLimited(category ratelimit.Category) bool
// Flush waits for all pending envelopes to be sent, with timeout
Flush(timeout time.Duration) bool
// FlushWithContext waits for all pending envelopes to be sent
FlushWithContext(ctx context.Context) bool
// Close shuts down the transport gracefully
Close()
}

View File

@ -0,0 +1,48 @@
package protocol
import (
"encoding/json"
"github.com/getsentry/sentry-go/internal/ratelimit"
)
// LogAttribute is the JSON representation for a single log attribute value.
type LogAttribute struct {
Value any `json:"value"`
Type string `json:"type"`
}
// Logs is a container for multiple log items which knows how to convert
// itself into a single batched log envelope item.
type Logs []TelemetryItem
func (ls Logs) ToEnvelopeItem() (*EnvelopeItem, error) {
// Convert each log to its JSON representation
items := make([]json.RawMessage, 0, len(ls))
for _, log := range ls {
logPayload, err := json.Marshal(log)
if err != nil {
continue
}
items = append(items, logPayload)
}
if len(items) == 0 {
return nil, nil
}
wrapper := struct {
Items []json.RawMessage `json:"items"`
}{Items: items}
payload, err := json.Marshal(wrapper)
if err != nil {
return nil, err
}
return NewLogItem(len(ls), payload), nil
}
func (Logs) GetCategory() ratelimit.Category { return ratelimit.CategoryLog }
func (Logs) GetEventID() string { return "" }
func (Logs) GetSdkInfo() *SdkInfo { return nil }
func (Logs) GetDynamicSamplingContext() map[string]string { return nil }

View File

@ -0,0 +1,41 @@
package protocol
import (
"encoding/json"
"github.com/getsentry/sentry-go/internal/ratelimit"
)
type Metrics []TelemetryItem
func (ms Metrics) ToEnvelopeItem() (*EnvelopeItem, error) {
// Convert each metric to its JSON representation
items := make([]json.RawMessage, 0, len(ms))
for _, metric := range ms {
metricPayload, err := json.Marshal(metric)
if err != nil {
continue
}
items = append(items, metricPayload)
}
if len(items) == 0 {
return nil, nil
}
wrapper := struct {
Items []json.RawMessage `json:"items"`
}{Items: items}
payload, err := json.Marshal(wrapper)
if err != nil {
return nil, err
}
return NewTraceMetricItem(len(items), payload), nil
}
func (Metrics) GetCategory() ratelimit.Category { return ratelimit.CategoryTraceMetric }
func (Metrics) GetEventID() string { return "" }
func (Metrics) GetSdkInfo() *SdkInfo { return nil }
func (Metrics) GetDynamicSamplingContext() map[string]string { return nil }

View File

@ -0,0 +1,15 @@
package protocol
// SdkInfo contains SDK metadata.
type SdkInfo struct {
Name string `json:"name,omitempty"`
Version string `json:"version,omitempty"`
Integrations []string `json:"integrations,omitempty"`
Packages []SdkPackage `json:"packages,omitempty"`
}
// SdkPackage describes a package that was installed.
type SdkPackage struct {
Name string `json:"name,omitempty"`
Version string `json:"version,omitempty"`
}

View File

@ -0,0 +1,18 @@
package protocol
import (
"crypto/rand"
"encoding/hex"
)
// GenerateEventID generates a random UUID v4 for use as a Sentry event ID.
func GenerateEventID() string {
id := make([]byte, 16)
// Prefer rand.Read over rand.Reader, see https://go-review.googlesource.com/c/go/+/272326/.
_, _ = rand.Read(id)
id[6] &= 0x0F // clear version
id[6] |= 0x40 // set version to 4 (random uuid)
id[8] &= 0x3F // clear variant
id[8] |= 0x80 // set to IETF variant
return hex.EncodeToString(id)
}

View File

@ -8,18 +8,21 @@ import (
)
// Reference:
// https://github.com/getsentry/relay/blob/0424a2e017d193a93918053c90cdae9472d164bf/relay-common/src/constants.rs#L116-L127
// https://github.com/getsentry/relay/blob/46dfaa850b8717a6e22c3e9a275ba17fe673b9da/relay-base-schema/src/data_category.rs#L231-L271
// Category classifies supported payload types that can be ingested by Sentry
// and, therefore, rate limited.
type Category string
// Known rate limit categories. As a special case, the CategoryAll applies to
// all known payload types.
// Known rate limit categories that are specified in rate limit headers.
const (
CategoryAll Category = ""
CategoryUnknown Category = "unknown" // Unknown category should not get rate limited
CategoryAll Category = "" // Special category for empty categories (applies to all)
CategoryError Category = "error"
CategoryTransaction Category = "transaction"
CategoryLog Category = "log_item"
CategoryMonitor Category = "monitor"
CategoryTraceMetric Category = "trace_metric"
)
// knownCategories is the set of currently known categories. Other categories
@ -28,14 +31,28 @@ var knownCategories = map[Category]struct{}{
CategoryAll: {},
CategoryError: {},
CategoryTransaction: {},
CategoryLog: {},
CategoryMonitor: {},
CategoryTraceMetric: {},
}
// String returns the category formatted for debugging.
func (c Category) String() string {
switch c {
case "":
case CategoryAll:
return "CategoryAll"
case CategoryError:
return "CategoryError"
case CategoryTransaction:
return "CategoryTransaction"
case CategoryLog:
return "CategoryLog"
case CategoryMonitor:
return "CategoryMonitor"
case CategoryTraceMetric:
return "CategoryTraceMetric"
default:
// For unknown categories, use the original formatting logic
caser := cases.Title(language.English)
rv := "Category"
for _, w := range strings.Fields(string(c)) {
@ -44,3 +61,49 @@ func (c Category) String() string {
return rv
}
}
// Priority represents the importance level of a category for buffer management.
type Priority int
const (
PriorityCritical Priority = iota + 1
PriorityHigh
PriorityMedium
PriorityLow
PriorityLowest
)
func (p Priority) String() string {
switch p {
case PriorityCritical:
return "critical"
case PriorityHigh:
return "high"
case PriorityMedium:
return "medium"
case PriorityLow:
return "low"
case PriorityLowest:
return "lowest"
default:
return "unknown"
}
}
// GetPriority returns the priority level for this category.
func (c Category) GetPriority() Priority {
switch c {
case CategoryError:
return PriorityCritical
case CategoryMonitor:
return PriorityHigh
case CategoryLog:
return PriorityLow
case CategoryTransaction:
return PriorityMedium
case CategoryTraceMetric:
return PriorityLow
default:
return PriorityMedium
}
}

View File

@ -15,7 +15,7 @@ var errInvalidXSRLRetryAfter = errors.New("invalid retry-after value")
//
// Example
//
// X-Sentry-Rate-Limits: 60:transaction, 2700:default;error;security
// X-Sentry-Rate-Limits: 60:transaction, 2700:default;error;security
//
// This will rate limit transactions for the next 60 seconds and errors for the
// next 2700 seconds.

View File

@ -0,0 +1,398 @@
package telemetry
import (
"sync"
"sync/atomic"
"time"
"github.com/getsentry/sentry-go/internal/ratelimit"
)
const (
defaultBucketedCapacity = 100
perBucketItemLimit = 100
)
type Bucket[T any] struct {
traceID string
items []T
createdAt time.Time
lastUpdatedAt time.Time
}
// BucketedBuffer groups items by trace id, flushing per bucket.
type BucketedBuffer[T any] struct {
mu sync.RWMutex
buckets []*Bucket[T]
traceIndex map[string]int
head int
tail int
itemCapacity int
bucketCapacity int
totalItems int
bucketCount int
category ratelimit.Category
priority ratelimit.Priority
overflowPolicy OverflowPolicy
batchSize int
timeout time.Duration
lastFlushTime time.Time
offered int64
dropped int64
onDropped func(item T, reason string)
}
func NewBucketedBuffer[T any](
category ratelimit.Category,
capacity int,
overflowPolicy OverflowPolicy,
batchSize int,
timeout time.Duration,
) *BucketedBuffer[T] {
if capacity <= 0 {
capacity = defaultBucketedCapacity
}
if batchSize <= 0 {
batchSize = 1
}
if timeout < 0 {
timeout = 0
}
bucketCapacity := capacity / 10
if bucketCapacity < 10 {
bucketCapacity = 10
}
return &BucketedBuffer[T]{
buckets: make([]*Bucket[T], bucketCapacity),
traceIndex: make(map[string]int),
itemCapacity: capacity,
bucketCapacity: bucketCapacity,
category: category,
priority: category.GetPriority(),
overflowPolicy: overflowPolicy,
batchSize: batchSize,
timeout: timeout,
lastFlushTime: time.Now(),
}
}
func (b *BucketedBuffer[T]) Offer(item T) bool {
atomic.AddInt64(&b.offered, 1)
traceID := ""
if ta, ok := any(item).(TraceAware); ok {
if tid, hasTrace := ta.GetTraceID(); hasTrace {
traceID = tid
}
}
b.mu.Lock()
defer b.mu.Unlock()
return b.offerToBucket(item, traceID)
}
func (b *BucketedBuffer[T]) offerToBucket(item T, traceID string) bool {
if traceID != "" {
if idx, exists := b.traceIndex[traceID]; exists {
bucket := b.buckets[idx]
if len(bucket.items) >= perBucketItemLimit {
delete(b.traceIndex, traceID)
} else {
bucket.items = append(bucket.items, item)
bucket.lastUpdatedAt = time.Now()
b.totalItems++
return true
}
}
}
if b.totalItems >= b.itemCapacity {
return b.handleOverflow(item, traceID)
}
if b.bucketCount >= b.bucketCapacity {
return b.handleOverflow(item, traceID)
}
bucket := &Bucket[T]{
traceID: traceID,
items: []T{item},
createdAt: time.Now(),
lastUpdatedAt: time.Now(),
}
b.buckets[b.tail] = bucket
if traceID != "" {
b.traceIndex[traceID] = b.tail
}
b.tail = (b.tail + 1) % b.bucketCapacity
b.bucketCount++
b.totalItems++
return true
}
func (b *BucketedBuffer[T]) handleOverflow(item T, traceID string) bool {
switch b.overflowPolicy {
case OverflowPolicyDropOldest:
oldestBucket := b.buckets[b.head]
if oldestBucket == nil {
atomic.AddInt64(&b.dropped, 1)
if b.onDropped != nil {
b.onDropped(item, "buffer_full_invalid_state")
}
return false
}
if oldestBucket.traceID != "" {
delete(b.traceIndex, oldestBucket.traceID)
}
droppedCount := len(oldestBucket.items)
atomic.AddInt64(&b.dropped, int64(droppedCount))
if b.onDropped != nil {
for _, di := range oldestBucket.items {
b.onDropped(di, "buffer_full_drop_oldest_bucket")
}
}
b.totalItems -= droppedCount
b.bucketCount--
b.head = (b.head + 1) % b.bucketCapacity
// add new bucket
bucket := &Bucket[T]{traceID: traceID, items: []T{item}, createdAt: time.Now(), lastUpdatedAt: time.Now()}
b.buckets[b.tail] = bucket
if traceID != "" {
b.traceIndex[traceID] = b.tail
}
b.tail = (b.tail + 1) % b.bucketCapacity
b.bucketCount++
b.totalItems++
return true
case OverflowPolicyDropNewest:
atomic.AddInt64(&b.dropped, 1)
if b.onDropped != nil {
b.onDropped(item, "buffer_full_drop_newest")
}
return false
default:
atomic.AddInt64(&b.dropped, 1)
if b.onDropped != nil {
b.onDropped(item, "unknown_overflow_policy")
}
return false
}
}
func (b *BucketedBuffer[T]) Poll() (T, bool) {
b.mu.Lock()
defer b.mu.Unlock()
var zero T
if b.bucketCount == 0 {
return zero, false
}
bucket := b.buckets[b.head]
if bucket == nil || len(bucket.items) == 0 {
return zero, false
}
item := bucket.items[0]
bucket.items = bucket.items[1:]
b.totalItems--
if len(bucket.items) == 0 {
if bucket.traceID != "" {
delete(b.traceIndex, bucket.traceID)
}
b.buckets[b.head] = nil
b.head = (b.head + 1) % b.bucketCapacity
b.bucketCount--
}
return item, true
}
func (b *BucketedBuffer[T]) PollBatch(maxItems int) []T {
if maxItems <= 0 {
return nil
}
b.mu.Lock()
defer b.mu.Unlock()
if b.bucketCount == 0 {
return nil
}
res := make([]T, 0, maxItems)
for len(res) < maxItems && b.bucketCount > 0 {
bucket := b.buckets[b.head]
if bucket == nil {
break
}
n := maxItems - len(res)
if n > len(bucket.items) {
n = len(bucket.items)
}
res = append(res, bucket.items[:n]...)
bucket.items = bucket.items[n:]
b.totalItems -= n
if len(bucket.items) == 0 {
if bucket.traceID != "" {
delete(b.traceIndex, bucket.traceID)
}
b.buckets[b.head] = nil
b.head = (b.head + 1) % b.bucketCapacity
b.bucketCount--
}
}
return res
}
func (b *BucketedBuffer[T]) PollIfReady() []T {
b.mu.Lock()
defer b.mu.Unlock()
if b.bucketCount == 0 {
return nil
}
ready := b.totalItems >= b.batchSize || (b.timeout > 0 && time.Since(b.lastFlushTime) >= b.timeout)
if !ready {
return nil
}
oldest := b.buckets[b.head]
if oldest == nil {
return nil
}
items := oldest.items
if oldest.traceID != "" {
delete(b.traceIndex, oldest.traceID)
}
b.buckets[b.head] = nil
b.head = (b.head + 1) % b.bucketCapacity
b.totalItems -= len(items)
b.bucketCount--
b.lastFlushTime = time.Now()
return items
}
func (b *BucketedBuffer[T]) Drain() []T {
b.mu.Lock()
defer b.mu.Unlock()
if b.bucketCount == 0 {
return nil
}
res := make([]T, 0, b.totalItems)
for i := 0; i < b.bucketCount; i++ {
idx := (b.head + i) % b.bucketCapacity
bucket := b.buckets[idx]
if bucket != nil {
res = append(res, bucket.items...)
b.buckets[idx] = nil
}
}
b.traceIndex = make(map[string]int)
b.head = 0
b.tail = 0
b.totalItems = 0
b.bucketCount = 0
return res
}
func (b *BucketedBuffer[T]) Peek() (T, bool) {
b.mu.RLock()
defer b.mu.RUnlock()
var zero T
if b.bucketCount == 0 {
return zero, false
}
bucket := b.buckets[b.head]
if bucket == nil || len(bucket.items) == 0 {
return zero, false
}
return bucket.items[0], true
}
func (b *BucketedBuffer[T]) Size() int { b.mu.RLock(); defer b.mu.RUnlock(); return b.totalItems }
func (b *BucketedBuffer[T]) Capacity() int { b.mu.RLock(); defer b.mu.RUnlock(); return b.itemCapacity }
func (b *BucketedBuffer[T]) Category() ratelimit.Category {
b.mu.RLock()
defer b.mu.RUnlock()
return b.category
}
func (b *BucketedBuffer[T]) Priority() ratelimit.Priority {
b.mu.RLock()
defer b.mu.RUnlock()
return b.priority
}
func (b *BucketedBuffer[T]) IsEmpty() bool {
b.mu.RLock()
defer b.mu.RUnlock()
return b.bucketCount == 0
}
func (b *BucketedBuffer[T]) IsFull() bool {
b.mu.RLock()
defer b.mu.RUnlock()
return b.totalItems >= b.itemCapacity
}
func (b *BucketedBuffer[T]) Utilization() float64 {
b.mu.RLock()
defer b.mu.RUnlock()
if b.itemCapacity == 0 {
return 0
}
return float64(b.totalItems) / float64(b.itemCapacity)
}
func (b *BucketedBuffer[T]) OfferedCount() int64 { return atomic.LoadInt64(&b.offered) }
func (b *BucketedBuffer[T]) DroppedCount() int64 { return atomic.LoadInt64(&b.dropped) }
func (b *BucketedBuffer[T]) AcceptedCount() int64 { return b.OfferedCount() - b.DroppedCount() }
func (b *BucketedBuffer[T]) DropRate() float64 {
off := b.OfferedCount()
if off == 0 {
return 0
}
return float64(b.DroppedCount()) / float64(off)
}
func (b *BucketedBuffer[T]) GetMetrics() BufferMetrics {
b.mu.RLock()
size := b.totalItems
util := 0.0
if b.itemCapacity > 0 {
util = float64(b.totalItems) / float64(b.itemCapacity)
}
b.mu.RUnlock()
return BufferMetrics{Category: b.category, Priority: b.priority, Capacity: b.itemCapacity, Size: size, Utilization: util, OfferedCount: b.OfferedCount(), DroppedCount: b.DroppedCount(), AcceptedCount: b.AcceptedCount(), DropRate: b.DropRate(), LastUpdated: time.Now()}
}
func (b *BucketedBuffer[T]) SetDroppedCallback(callback func(item T, reason string)) {
b.mu.Lock()
defer b.mu.Unlock()
b.onDropped = callback
}
func (b *BucketedBuffer[T]) Clear() {
b.mu.Lock()
defer b.mu.Unlock()
for i := 0; i < b.bucketCapacity; i++ {
b.buckets[i] = nil
}
b.traceIndex = make(map[string]int)
b.head = 0
b.tail = 0
b.totalItems = 0
b.bucketCount = 0
}
func (b *BucketedBuffer[T]) IsReadyToFlush() bool {
b.mu.RLock()
defer b.mu.RUnlock()
if b.bucketCount == 0 {
return false
}
if b.totalItems >= b.batchSize {
return true
}
if b.timeout > 0 && time.Since(b.lastFlushTime) >= b.timeout {
return true
}
return false
}
func (b *BucketedBuffer[T]) MarkFlushed() {
b.mu.Lock()
defer b.mu.Unlock()
b.lastFlushTime = time.Now()
}

View File

@ -0,0 +1,42 @@
package telemetry
import (
"github.com/getsentry/sentry-go/internal/ratelimit"
)
// Buffer defines the common interface for all buffer implementations.
type Buffer[T any] interface {
// Core operations
Offer(item T) bool
Poll() (T, bool)
PollBatch(maxItems int) []T
PollIfReady() []T
Drain() []T
Peek() (T, bool)
// State queries
Size() int
Capacity() int
IsEmpty() bool
IsFull() bool
Utilization() float64
// Flush management
IsReadyToFlush() bool
MarkFlushed()
// Category/Priority
Category() ratelimit.Category
Priority() ratelimit.Priority
// Metrics
OfferedCount() int64
DroppedCount() int64
AcceptedCount() int64
DropRate() float64
GetMetrics() BufferMetrics
// Configuration
SetDroppedCallback(callback func(item T, reason string))
Clear()
}

View File

@ -0,0 +1,49 @@
package telemetry
import (
"context"
"time"
"github.com/getsentry/sentry-go/internal/protocol"
"github.com/getsentry/sentry-go/internal/ratelimit"
)
// Processor is the top-level object that wraps the scheduler and buffers.
type Processor struct {
scheduler *Scheduler
}
// NewProcessor creates a new Processor with the given configuration.
func NewProcessor(
buffers map[ratelimit.Category]Buffer[protocol.TelemetryItem],
transport protocol.TelemetryTransport,
dsn *protocol.Dsn,
sdkInfo *protocol.SdkInfo,
) *Processor {
scheduler := NewScheduler(buffers, transport, dsn, sdkInfo)
scheduler.Start()
return &Processor{
scheduler: scheduler,
}
}
// Add adds a TelemetryItem to the appropriate buffer based on its category.
func (b *Processor) Add(item protocol.TelemetryItem) bool {
return b.scheduler.Add(item)
}
// Flush forces all buffers to flush within the given timeout.
func (b *Processor) Flush(timeout time.Duration) bool {
return b.scheduler.Flush(timeout)
}
// FlushWithContext flushes with a custom context for cancellation.
func (b *Processor) FlushWithContext(ctx context.Context) bool {
return b.scheduler.FlushWithContext(ctx)
}
// Close stops the buffer, flushes remaining data, and releases resources.
func (b *Processor) Close(timeout time.Duration) {
b.scheduler.Stop(timeout)
}

View File

@ -0,0 +1,378 @@
package telemetry
import (
"sync"
"sync/atomic"
"time"
"github.com/getsentry/sentry-go/internal/ratelimit"
)
const defaultCapacity = 100
// RingBuffer is a thread-safe ring buffer with overflow policies.
type RingBuffer[T any] struct {
mu sync.RWMutex
items []T
head int
tail int
size int
capacity int
category ratelimit.Category
priority ratelimit.Priority
overflowPolicy OverflowPolicy
batchSize int
timeout time.Duration
lastFlushTime time.Time
offered int64
dropped int64
onDropped func(item T, reason string)
}
func NewRingBuffer[T any](category ratelimit.Category, capacity int, overflowPolicy OverflowPolicy, batchSize int, timeout time.Duration) *RingBuffer[T] {
if capacity <= 0 {
capacity = defaultCapacity
}
if batchSize <= 0 {
batchSize = 1
}
if timeout < 0 {
timeout = 0
}
return &RingBuffer[T]{
items: make([]T, capacity),
capacity: capacity,
category: category,
priority: category.GetPriority(),
overflowPolicy: overflowPolicy,
batchSize: batchSize,
timeout: timeout,
lastFlushTime: time.Now(),
}
}
func (b *RingBuffer[T]) SetDroppedCallback(callback func(item T, reason string)) {
b.mu.Lock()
defer b.mu.Unlock()
b.onDropped = callback
}
func (b *RingBuffer[T]) Offer(item T) bool {
atomic.AddInt64(&b.offered, 1)
b.mu.Lock()
defer b.mu.Unlock()
if b.size < b.capacity {
b.items[b.tail] = item
b.tail = (b.tail + 1) % b.capacity
b.size++
return true
}
switch b.overflowPolicy {
case OverflowPolicyDropOldest:
oldItem := b.items[b.head]
b.items[b.head] = item
b.head = (b.head + 1) % b.capacity
b.tail = (b.tail + 1) % b.capacity
atomic.AddInt64(&b.dropped, 1)
if b.onDropped != nil {
b.onDropped(oldItem, "buffer_full_drop_oldest")
}
return true
case OverflowPolicyDropNewest:
atomic.AddInt64(&b.dropped, 1)
if b.onDropped != nil {
b.onDropped(item, "buffer_full_drop_newest")
}
return false
default:
atomic.AddInt64(&b.dropped, 1)
if b.onDropped != nil {
b.onDropped(item, "unknown_overflow_policy")
}
return false
}
}
func (b *RingBuffer[T]) Poll() (T, bool) {
b.mu.Lock()
defer b.mu.Unlock()
var zero T
if b.size == 0 {
return zero, false
}
item := b.items[b.head]
b.items[b.head] = zero
b.head = (b.head + 1) % b.capacity
b.size--
return item, true
}
func (b *RingBuffer[T]) PollBatch(maxItems int) []T {
if maxItems <= 0 {
return nil
}
b.mu.Lock()
defer b.mu.Unlock()
if b.size == 0 {
return nil
}
itemCount := maxItems
if itemCount > b.size {
itemCount = b.size
}
result := make([]T, itemCount)
var zero T
for i := 0; i < itemCount; i++ {
result[i] = b.items[b.head]
b.items[b.head] = zero
b.head = (b.head + 1) % b.capacity
b.size--
}
return result
}
func (b *RingBuffer[T]) Drain() []T {
b.mu.Lock()
defer b.mu.Unlock()
if b.size == 0 {
return nil
}
result := make([]T, b.size)
index := 0
var zero T
for i := 0; i < b.size; i++ {
pos := (b.head + i) % b.capacity
result[index] = b.items[pos]
b.items[pos] = zero
index++
}
b.head = 0
b.tail = 0
b.size = 0
return result
}
func (b *RingBuffer[T]) Peek() (T, bool) {
b.mu.RLock()
defer b.mu.RUnlock()
var zero T
if b.size == 0 {
return zero, false
}
return b.items[b.head], true
}
func (b *RingBuffer[T]) Size() int {
b.mu.RLock()
defer b.mu.RUnlock()
return b.size
}
func (b *RingBuffer[T]) Capacity() int {
b.mu.RLock()
defer b.mu.RUnlock()
return b.capacity
}
func (b *RingBuffer[T]) Category() ratelimit.Category {
b.mu.RLock()
defer b.mu.RUnlock()
return b.category
}
func (b *RingBuffer[T]) Priority() ratelimit.Priority {
b.mu.RLock()
defer b.mu.RUnlock()
return b.priority
}
func (b *RingBuffer[T]) IsEmpty() bool {
b.mu.RLock()
defer b.mu.RUnlock()
return b.size == 0
}
func (b *RingBuffer[T]) IsFull() bool {
b.mu.RLock()
defer b.mu.RUnlock()
return b.size == b.capacity
}
func (b *RingBuffer[T]) Utilization() float64 {
b.mu.RLock()
defer b.mu.RUnlock()
return float64(b.size) / float64(b.capacity)
}
func (b *RingBuffer[T]) OfferedCount() int64 {
return atomic.LoadInt64(&b.offered)
}
func (b *RingBuffer[T]) DroppedCount() int64 {
return atomic.LoadInt64(&b.dropped)
}
func (b *RingBuffer[T]) AcceptedCount() int64 {
return b.OfferedCount() - b.DroppedCount()
}
func (b *RingBuffer[T]) DropRate() float64 {
offered := b.OfferedCount()
if offered == 0 {
return 0.0
}
return float64(b.DroppedCount()) / float64(offered)
}
func (b *RingBuffer[T]) Clear() {
b.mu.Lock()
defer b.mu.Unlock()
var zero T
for i := 0; i < b.capacity; i++ {
b.items[i] = zero
}
b.head = 0
b.tail = 0
b.size = 0
}
func (b *RingBuffer[T]) GetMetrics() BufferMetrics {
b.mu.RLock()
size := b.size
util := float64(b.size) / float64(b.capacity)
b.mu.RUnlock()
return BufferMetrics{
Category: b.category,
Priority: b.priority,
Capacity: b.capacity,
Size: size,
Utilization: util,
OfferedCount: b.OfferedCount(),
DroppedCount: b.DroppedCount(),
AcceptedCount: b.AcceptedCount(),
DropRate: b.DropRate(),
LastUpdated: time.Now(),
}
}
func (b *RingBuffer[T]) IsReadyToFlush() bool {
b.mu.RLock()
defer b.mu.RUnlock()
if b.size == 0 {
return false
}
if b.size >= b.batchSize {
return true
}
if b.timeout > 0 && time.Since(b.lastFlushTime) >= b.timeout {
return true
}
return false
}
func (b *RingBuffer[T]) MarkFlushed() {
b.mu.Lock()
defer b.mu.Unlock()
b.lastFlushTime = time.Now()
}
func (b *RingBuffer[T]) PollIfReady() []T {
b.mu.Lock()
defer b.mu.Unlock()
if b.size == 0 {
return nil
}
ready := b.size >= b.batchSize ||
(b.timeout > 0 && time.Since(b.lastFlushTime) >= b.timeout)
if !ready {
return nil
}
itemCount := b.batchSize
if itemCount > b.size {
itemCount = b.size
}
result := make([]T, itemCount)
var zero T
for i := 0; i < itemCount; i++ {
result[i] = b.items[b.head]
b.items[b.head] = zero
b.head = (b.head + 1) % b.capacity
b.size--
}
b.lastFlushTime = time.Now()
return result
}
type BufferMetrics struct {
Category ratelimit.Category `json:"category"`
Priority ratelimit.Priority `json:"priority"`
Capacity int `json:"capacity"`
Size int `json:"size"`
Utilization float64 `json:"utilization"`
OfferedCount int64 `json:"offered_count"`
DroppedCount int64 `json:"dropped_count"`
AcceptedCount int64 `json:"accepted_count"`
DropRate float64 `json:"drop_rate"`
LastUpdated time.Time `json:"last_updated"`
}
// OverflowPolicy defines how the ring buffer handles overflow.
type OverflowPolicy int
const (
OverflowPolicyDropOldest OverflowPolicy = iota
OverflowPolicyDropNewest
)
func (op OverflowPolicy) String() string {
switch op {
case OverflowPolicyDropOldest:
return "drop_oldest"
case OverflowPolicyDropNewest:
return "drop_newest"
default:
return "unknown"
}
}

View File

@ -0,0 +1,301 @@
package telemetry
import (
"context"
"sync"
"time"
"github.com/getsentry/sentry-go/internal/debuglog"
"github.com/getsentry/sentry-go/internal/protocol"
"github.com/getsentry/sentry-go/internal/ratelimit"
)
// Scheduler implements a weighted round-robin scheduler for processing buffered events.
type Scheduler struct {
buffers map[ratelimit.Category]Buffer[protocol.TelemetryItem]
transport protocol.TelemetryTransport
dsn *protocol.Dsn
sdkInfo *protocol.SdkInfo
currentCycle []ratelimit.Priority
cyclePos int
ctx context.Context
cancel context.CancelFunc
processingWg sync.WaitGroup
mu sync.Mutex
cond *sync.Cond
startOnce sync.Once
finishOnce sync.Once
}
func NewScheduler(
buffers map[ratelimit.Category]Buffer[protocol.TelemetryItem],
transport protocol.TelemetryTransport,
dsn *protocol.Dsn,
sdkInfo *protocol.SdkInfo,
) *Scheduler {
ctx, cancel := context.WithCancel(context.Background())
priorityWeights := map[ratelimit.Priority]int{
ratelimit.PriorityCritical: 5,
ratelimit.PriorityHigh: 4,
ratelimit.PriorityMedium: 3,
ratelimit.PriorityLow: 2,
ratelimit.PriorityLowest: 1,
}
var currentCycle []ratelimit.Priority
for priority, weight := range priorityWeights {
hasBuffers := false
for _, buffer := range buffers {
if buffer.Priority() == priority {
hasBuffers = true
break
}
}
if hasBuffers {
for i := 0; i < weight; i++ {
currentCycle = append(currentCycle, priority)
}
}
}
s := &Scheduler{
buffers: buffers,
transport: transport,
dsn: dsn,
sdkInfo: sdkInfo,
currentCycle: currentCycle,
ctx: ctx,
cancel: cancel,
}
s.cond = sync.NewCond(&s.mu)
return s
}
func (s *Scheduler) Start() {
s.startOnce.Do(func() {
s.processingWg.Add(1)
go s.run()
})
}
func (s *Scheduler) Stop(timeout time.Duration) {
s.finishOnce.Do(func() {
s.Flush(timeout)
s.cancel()
s.cond.Broadcast()
done := make(chan struct{})
go func() {
defer close(done)
s.processingWg.Wait()
}()
select {
case <-done:
case <-time.After(timeout):
debuglog.Printf("scheduler stop timed out after %v", timeout)
}
})
}
func (s *Scheduler) Signal() {
s.cond.Signal()
}
func (s *Scheduler) Add(item protocol.TelemetryItem) bool {
category := item.GetCategory()
buffer, exists := s.buffers[category]
if !exists {
return false
}
accepted := buffer.Offer(item)
if accepted {
s.Signal()
}
return accepted
}
func (s *Scheduler) Flush(timeout time.Duration) bool {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
return s.FlushWithContext(ctx)
}
func (s *Scheduler) FlushWithContext(ctx context.Context) bool {
s.flushBuffers()
return s.transport.FlushWithContext(ctx)
}
func (s *Scheduler) run() {
defer s.processingWg.Done()
go func() {
ticker := time.NewTicker(100 * time.Millisecond)
defer ticker.Stop()
for {
select {
case <-ticker.C:
s.cond.Broadcast()
case <-s.ctx.Done():
return
}
}
}()
for {
s.mu.Lock()
for !s.hasWork() && s.ctx.Err() == nil {
s.cond.Wait()
}
if s.ctx.Err() != nil {
s.mu.Unlock()
return
}
s.mu.Unlock()
s.processNextBatch()
}
}
func (s *Scheduler) hasWork() bool {
for _, buffer := range s.buffers {
if buffer.IsReadyToFlush() {
return true
}
}
return false
}
func (s *Scheduler) processNextBatch() {
if len(s.currentCycle) == 0 {
return
}
priority := s.currentCycle[s.cyclePos]
s.cyclePos = (s.cyclePos + 1) % len(s.currentCycle)
var bufferToProcess Buffer[protocol.TelemetryItem]
var categoryToProcess ratelimit.Category
for category, buffer := range s.buffers {
if buffer.Priority() == priority && buffer.IsReadyToFlush() {
bufferToProcess = buffer
categoryToProcess = category
break
}
}
if bufferToProcess != nil {
s.processItems(bufferToProcess, categoryToProcess, false)
}
}
func (s *Scheduler) processItems(buffer Buffer[protocol.TelemetryItem], category ratelimit.Category, force bool) {
var items []protocol.TelemetryItem
if force {
items = buffer.Drain()
} else {
items = buffer.PollIfReady()
}
// drop the current batch if rate-limited or if transport is full
if len(items) == 0 || s.isRateLimited(category) || !s.transport.HasCapacity() {
return
}
switch category {
case ratelimit.CategoryLog:
logs := protocol.Logs(items)
header := &protocol.EnvelopeHeader{EventID: protocol.GenerateEventID(), SentAt: time.Now(), Sdk: s.sdkInfo}
if s.dsn != nil {
header.Dsn = s.dsn.String()
}
envelope := protocol.NewEnvelope(header)
item, err := logs.ToEnvelopeItem()
if err != nil {
debuglog.Printf("error creating log batch envelope item: %v", err)
return
}
envelope.AddItem(item)
if err := s.transport.SendEnvelope(envelope); err != nil {
debuglog.Printf("error sending envelope: %v", err)
}
return
case ratelimit.CategoryTraceMetric:
metrics := protocol.Metrics(items)
header := &protocol.EnvelopeHeader{EventID: protocol.GenerateEventID(), SentAt: time.Now(), Sdk: s.sdkInfo}
if s.dsn != nil {
header.Dsn = s.dsn.String()
}
envelope := protocol.NewEnvelope(header)
item, err := metrics.ToEnvelopeItem()
if err != nil {
debuglog.Printf("error creating trace metric batch envelope item: %v", err)
return
}
envelope.AddItem(item)
if err := s.transport.SendEnvelope(envelope); err != nil {
debuglog.Printf("error sending envelope: %v", err)
}
return
default:
// if the buffers are properly configured, buffer.PollIfReady should return a single item for every category
// other than logs. We still iterate over the items just in case, because we don't want to send broken envelopes.
for _, it := range items {
convertible, ok := it.(protocol.EnvelopeItemConvertible)
if !ok {
debuglog.Printf("item does not implement EnvelopeItemConvertible: %T", it)
continue
}
s.sendItem(convertible)
}
}
}
func (s *Scheduler) sendItem(item protocol.EnvelopeItemConvertible) {
header := &protocol.EnvelopeHeader{
EventID: item.GetEventID(),
SentAt: time.Now(),
Trace: item.GetDynamicSamplingContext(),
Sdk: s.sdkInfo,
}
if header.EventID == "" {
header.EventID = protocol.GenerateEventID()
}
if s.dsn != nil {
header.Dsn = s.dsn.String()
}
envelope := protocol.NewEnvelope(header)
envItem, err := item.ToEnvelopeItem()
if err != nil {
debuglog.Printf("error while converting to envelope item: %v", err)
return
}
envelope.AddItem(envItem)
if err := s.transport.SendEnvelope(envelope); err != nil {
debuglog.Printf("error sending envelope: %v", err)
}
}
func (s *Scheduler) flushBuffers() {
for category, buffer := range s.buffers {
if !buffer.IsEmpty() {
s.processItems(buffer, category, true)
}
}
}
func (s *Scheduler) isRateLimited(category ratelimit.Category) bool {
return s.transport.IsRateLimited(category)
}

View File

@ -0,0 +1,7 @@
package telemetry
// TraceAware is implemented by items that can expose a trace ID.
// BucketedBuffer uses this to group items by trace.
type TraceAware interface {
GetTraceID() (string, bool)
}

View File

@ -0,0 +1,43 @@
package util
import "sync"
type SyncMap[K comparable, V any] struct {
m sync.Map
}
func (s *SyncMap[K, V]) Store(key K, value V) {
s.m.Store(key, value)
}
func (s *SyncMap[K, V]) CompareAndDelete(key K, value V) {
s.m.CompareAndDelete(key, value)
}
func (s *SyncMap[K, V]) Load(key K) (V, bool) {
v, ok := s.m.Load(key)
if !ok {
var zero V
return zero, false
}
return v.(V), true
}
func (s *SyncMap[K, V]) Delete(key K) {
s.m.Delete(key)
}
func (s *SyncMap[K, V]) LoadOrStore(key K, value V) (V, bool) {
actual, loaded := s.m.LoadOrStore(key, value)
return actual.(V), loaded
}
func (s *SyncMap[K, V]) Clear() {
s.m.Clear()
}
func (s *SyncMap[K, V]) Range(f func(key K, value V) bool) {
s.m.Range(func(key, value any) bool {
return f(key.(K), value.(V))
})
}

View File

@ -0,0 +1,83 @@
package util
import (
"fmt"
"io"
"net/http"
"github.com/getsentry/sentry-go/internal/debuglog"
"github.com/getsentry/sentry-go/internal/protocol"
)
// MaxDrainResponseBytes is the maximum number of bytes that transport
// implementations will read from response bodies when draining them.
const MaxDrainResponseBytes = 16 << 10
// HandleHTTPResponse is a helper method that reads the HTTP response and handles debug output.
func HandleHTTPResponse(response *http.Response, identifier string) bool {
if response.StatusCode >= 200 && response.StatusCode < 300 {
return true
}
if response.StatusCode >= 400 && response.StatusCode <= 599 {
body, err := io.ReadAll(io.LimitReader(response.Body, MaxDrainResponseBytes))
if err != nil {
debuglog.Printf("Error while reading response body: %v", err)
return false
}
switch {
case response.StatusCode == http.StatusRequestEntityTooLarge:
debuglog.Printf("Sending %s failed because the request was too large: %s", identifier, string(body))
case response.StatusCode >= 500:
debuglog.Printf("Sending %s failed with server error %d: %s", identifier, response.StatusCode, string(body))
default:
debuglog.Printf("Sending %s failed with client error %d: %s", identifier, response.StatusCode, string(body))
}
return false
}
debuglog.Printf("Unexpected status code %d for event %s", response.StatusCode, identifier)
return false
}
// EnvelopeIdentifier returns a human-readable identifier for the event to be used in log messages.
// Format: "<description> [<event-id>]".
func EnvelopeIdentifier(envelope *protocol.Envelope) string {
if envelope == nil || len(envelope.Items) == 0 {
return "empty envelope"
}
var description string
// we don't currently support mixed envelope types, so all event types would have the same type.
itemType := envelope.Items[0].Header.Type
switch itemType {
case protocol.EnvelopeItemTypeEvent:
description = "error"
case protocol.EnvelopeItemTypeTransaction:
description = "transaction"
case protocol.EnvelopeItemTypeCheckIn:
description = "check-in"
case protocol.EnvelopeItemTypeLog:
logCount := 0
for _, item := range envelope.Items {
if item != nil && item.Header != nil && item.Header.Type == protocol.EnvelopeItemTypeLog && item.Header.ItemCount != nil {
logCount += *item.Header.ItemCount
}
}
description = fmt.Sprintf("%d log events", logCount)
case protocol.EnvelopeItemTypeTraceMetric:
metricCount := 0
for _, item := range envelope.Items {
if item != nil && item.Header != nil && item.Header.Type == protocol.EnvelopeItemTypeTraceMetric && item.Header.ItemCount != nil {
metricCount += *item.Header.ItemCount
}
}
description = fmt.Sprintf("%d metric events", metricCount)
default:
description = fmt.Sprintf("%s event", itemType)
}
return fmt.Sprintf("%s [%s]", description, envelope.Header.EventID)
}

333
vendor/github.com/getsentry/sentry-go/log.go generated vendored Normal file
View File

@ -0,0 +1,333 @@
package sentry
import (
"context"
"fmt"
"maps"
"os"
"strings"
"sync"
"time"
"github.com/getsentry/sentry-go/attribute"
"github.com/getsentry/sentry-go/internal/debuglog"
)
type LogLevel string
const (
LogLevelTrace LogLevel = "trace"
LogLevelDebug LogLevel = "debug"
LogLevelInfo LogLevel = "info"
LogLevelWarn LogLevel = "warn"
LogLevelError LogLevel = "error"
LogLevelFatal LogLevel = "fatal"
)
const (
LogSeverityTrace int = 1
LogSeverityDebug int = 5
LogSeverityInfo int = 9
LogSeverityWarning int = 13
LogSeverityError int = 17
LogSeverityFatal int = 21
)
type sentryLogger struct {
ctx context.Context
hub *Hub
attributes map[string]attribute.Value
defaultAttributes map[string]attribute.Value
mu sync.RWMutex
}
type logEntry struct {
logger *sentryLogger
ctx context.Context
level LogLevel
severity int
attributes map[string]attribute.Value
shouldPanic bool
shouldFatal bool
}
// NewLogger returns a Logger that emits logs to Sentry. If logging is turned off, all logs get discarded.
func NewLogger(ctx context.Context) Logger { // nolint: dupl
var hub *Hub
hub = GetHubFromContext(ctx)
if hub == nil {
hub = CurrentHub()
}
client := hub.Client()
if client != nil && client.options.EnableLogs {
// Build default attrs
serverAddr := client.options.ServerName
if serverAddr == "" {
serverAddr, _ = os.Hostname()
}
defaults := map[string]string{
"sentry.release": client.options.Release,
"sentry.environment": client.options.Environment,
"sentry.server.address": serverAddr,
"sentry.sdk.name": client.sdkIdentifier,
"sentry.sdk.version": client.sdkVersion,
}
defaultAttrs := make(map[string]attribute.Value, len(defaults))
for k, v := range defaults {
if v != "" {
defaultAttrs[k] = attribute.StringValue(v)
}
}
return &sentryLogger{
ctx: ctx,
hub: hub,
attributes: make(map[string]attribute.Value),
defaultAttributes: defaultAttrs,
mu: sync.RWMutex{},
}
}
debuglog.Println("fallback to noopLogger: enableLogs disabled")
return &noopLogger{}
}
func (l *sentryLogger) Write(p []byte) (int, error) {
msg := strings.TrimRight(string(p), "\n")
l.Info().Emit(msg)
return len(p), nil
}
func (l *sentryLogger) log(ctx context.Context, level LogLevel, severity int, message string, entryAttrs map[string]attribute.Value, args ...interface{}) {
if message == "" {
return
}
hub := hubFromContexts(ctx, l.ctx)
if hub == nil {
hub = l.hub
}
client := hub.Client()
if client == nil {
return
}
scope := hub.Scope()
traceID, spanID := resolveTrace(scope, ctx, l.ctx)
// Pre-allocate with capacity hint to avoid map growth reallocations
estimatedCap := len(l.defaultAttributes) + len(entryAttrs) + len(args) + 8 // scope ~3 + instance ~5
attrs := make(map[string]attribute.Value, estimatedCap)
// attribute precedence: default -> scope -> instance (from SetAttrs) -> entry-specific
for k, v := range l.defaultAttributes {
attrs[k] = v
}
scope.populateAttrs(attrs)
l.mu.RLock()
for k, v := range l.attributes {
attrs[k] = v
}
l.mu.RUnlock()
for k, v := range entryAttrs {
attrs[k] = v
}
if len(args) > 0 {
attrs["sentry.message.template"] = attribute.StringValue(message)
for i, p := range args {
attrs[fmt.Sprintf("sentry.message.parameters.%d", i)] = attribute.StringValue(fmt.Sprintf("%+v", p))
}
}
log := &Log{
Timestamp: time.Now(),
TraceID: traceID,
SpanID: spanID,
Level: level,
Severity: severity,
Body: fmt.Sprintf(message, args...),
Attributes: attrs,
}
client.captureLog(log, scope)
if client.options.Debug {
debuglog.Printf(message, args...)
}
}
func (l *sentryLogger) SetAttributes(attrs ...attribute.Builder) {
l.mu.Lock()
defer l.mu.Unlock()
for _, a := range attrs {
if a.Value.Type() == attribute.INVALID {
debuglog.Printf("invalid attribute: %v", a)
continue
}
l.attributes[a.Key] = a.Value
}
}
func (l *sentryLogger) Trace() LogEntry {
return &logEntry{
logger: l,
ctx: l.ctx,
level: LogLevelTrace,
severity: LogSeverityTrace,
attributes: make(map[string]attribute.Value),
}
}
func (l *sentryLogger) Debug() LogEntry {
return &logEntry{
logger: l,
ctx: l.ctx,
level: LogLevelDebug,
severity: LogSeverityDebug,
attributes: make(map[string]attribute.Value),
}
}
func (l *sentryLogger) Info() LogEntry {
return &logEntry{
logger: l,
ctx: l.ctx,
level: LogLevelInfo,
severity: LogSeverityInfo,
attributes: make(map[string]attribute.Value),
}
}
func (l *sentryLogger) Warn() LogEntry {
return &logEntry{
logger: l,
ctx: l.ctx,
level: LogLevelWarn,
severity: LogSeverityWarning,
attributes: make(map[string]attribute.Value),
}
}
func (l *sentryLogger) Error() LogEntry {
return &logEntry{
logger: l,
ctx: l.ctx,
level: LogLevelError,
severity: LogSeverityError,
attributes: make(map[string]attribute.Value),
}
}
func (l *sentryLogger) Fatal() LogEntry {
return &logEntry{
logger: l,
ctx: l.ctx,
level: LogLevelFatal,
severity: LogSeverityFatal,
attributes: make(map[string]attribute.Value),
shouldFatal: true,
}
}
func (l *sentryLogger) Panic() LogEntry {
return &logEntry{
logger: l,
ctx: l.ctx,
level: LogLevelFatal,
severity: LogSeverityFatal,
attributes: make(map[string]attribute.Value),
shouldPanic: true,
}
}
func (l *sentryLogger) LFatal() LogEntry {
return &logEntry{
logger: l,
ctx: l.ctx,
level: LogLevelFatal,
severity: LogSeverityFatal,
attributes: make(map[string]attribute.Value),
}
}
func (l *sentryLogger) GetCtx() context.Context {
return l.ctx
}
func (e *logEntry) WithCtx(ctx context.Context) LogEntry {
return &logEntry{
logger: e.logger,
ctx: ctx,
level: e.level,
severity: e.severity,
attributes: maps.Clone(e.attributes),
shouldPanic: e.shouldPanic,
shouldFatal: e.shouldFatal,
}
}
func (e *logEntry) String(key, value string) LogEntry {
e.attributes[key] = attribute.StringValue(value)
return e
}
func (e *logEntry) Int(key string, value int) LogEntry {
e.attributes[key] = attribute.Int64Value(int64(value))
return e
}
func (e *logEntry) Int64(key string, value int64) LogEntry {
e.attributes[key] = attribute.Int64Value(value)
return e
}
func (e *logEntry) Float64(key string, value float64) LogEntry {
e.attributes[key] = attribute.Float64Value(value)
return e
}
func (e *logEntry) Bool(key string, value bool) LogEntry {
e.attributes[key] = attribute.BoolValue(value)
return e
}
// Uint64 adds uint64 attributes to the log entry.
//
// This method is intentionally not part of the LogEntry interface to avoid exposing uint64 in the public API.
func (e *logEntry) Uint64(key string, value uint64) LogEntry {
e.attributes[key] = attribute.Uint64Value(value)
return e
}
func (e *logEntry) Emit(args ...interface{}) {
e.logger.log(e.ctx, e.level, e.severity, fmt.Sprint(args...), e.attributes)
if e.level == LogLevelFatal {
if e.shouldPanic {
panic(fmt.Sprint(args...))
}
if e.shouldFatal {
os.Exit(1)
}
}
}
func (e *logEntry) Emitf(format string, args ...interface{}) {
e.logger.log(e.ctx, e.level, e.severity, format, e.attributes, args...)
if e.level == LogLevelFatal {
if e.shouldPanic {
formattedMessage := fmt.Sprintf(format, args...)
panic(formattedMessage)
}
if e.shouldFatal {
os.Exit(1)
}
}
}

View File

@ -0,0 +1,32 @@
package sentry
import (
"time"
)
// logBatchProcessor batches logs and sends them to Sentry.
type logBatchProcessor struct {
*batchProcessor[Log]
}
func newLogBatchProcessor(client *Client) *logBatchProcessor {
return &logBatchProcessor{
batchProcessor: newBatchProcessor(func(items []Log) {
if len(items) == 0 {
return
}
event := NewEvent()
event.Timestamp = time.Now()
event.EventID = EventID(uuid())
event.Type = logEvent.Type
event.Logs = items
client.Transport.SendEvent(event)
}),
}
}
func (p *logBatchProcessor) Send(log *Log) bool {
return p.batchProcessor.Send(*log)
}

114
vendor/github.com/getsentry/sentry-go/log_fallback.go generated vendored Normal file
View File

@ -0,0 +1,114 @@
package sentry
import (
"context"
"fmt"
"os"
"github.com/getsentry/sentry-go/attribute"
"github.com/getsentry/sentry-go/internal/debuglog"
)
// Fallback, no-op logger if logging is disabled.
type noopLogger struct{}
// noopLogEntry implements LogEntry for the no-op logger.
type noopLogEntry struct {
level LogLevel
shouldPanic bool
shouldFatal bool
}
func (n *noopLogEntry) WithCtx(_ context.Context) LogEntry {
return n
}
func (n *noopLogEntry) String(_, _ string) LogEntry {
return n
}
func (n *noopLogEntry) Int(_ string, _ int) LogEntry {
return n
}
func (n *noopLogEntry) Int64(_ string, _ int64) LogEntry {
return n
}
func (n *noopLogEntry) Float64(_ string, _ float64) LogEntry {
return n
}
func (n *noopLogEntry) Bool(_ string, _ bool) LogEntry {
return n
}
func (n *noopLogEntry) Attributes(_ ...attribute.Builder) LogEntry {
return n
}
func (n *noopLogEntry) Emit(args ...interface{}) {
debuglog.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", n.level)
if n.level == LogLevelFatal {
if n.shouldPanic {
panic(args)
}
if n.shouldFatal {
os.Exit(1)
}
}
}
func (n *noopLogEntry) Emitf(message string, args ...interface{}) {
debuglog.Printf("Log with level=[%v] is being dropped. Turn on logging via EnableLogs", n.level)
if n.level == LogLevelFatal {
if n.shouldPanic {
panic(fmt.Sprintf(message, args...))
}
if n.shouldFatal {
os.Exit(1)
}
}
}
func (n *noopLogger) GetCtx() context.Context { return context.Background() }
func (*noopLogger) Trace() LogEntry {
return &noopLogEntry{level: LogLevelTrace}
}
func (*noopLogger) Debug() LogEntry {
return &noopLogEntry{level: LogLevelDebug}
}
func (*noopLogger) Info() LogEntry {
return &noopLogEntry{level: LogLevelInfo}
}
func (*noopLogger) Warn() LogEntry {
return &noopLogEntry{level: LogLevelWarn}
}
func (*noopLogger) Error() LogEntry {
return &noopLogEntry{level: LogLevelError}
}
func (*noopLogger) Fatal() LogEntry {
return &noopLogEntry{level: LogLevelFatal, shouldFatal: true}
}
func (*noopLogger) Panic() LogEntry {
return &noopLogEntry{level: LogLevelFatal, shouldPanic: true}
}
func (*noopLogger) LFatal() LogEntry {
return &noopLogEntry{level: LogLevelFatal}
}
func (*noopLogger) SetAttributes(...attribute.Builder) {
debuglog.Printf("No attributes attached. Turn on logging via EnableLogs")
}
func (*noopLogger) Write(_ []byte) (n int, err error) {
return 0, fmt.Errorf("log with level=[%v] is being dropped. Turn on logging via EnableLogs", LogLevelInfo)
}

View File

@ -0,0 +1,32 @@
package sentry
import (
"time"
)
// metricBatchProcessor batches metrics and sends them to Sentry.
type metricBatchProcessor struct {
*batchProcessor[Metric]
}
func newMetricBatchProcessor(client *Client) *metricBatchProcessor {
return &metricBatchProcessor{
batchProcessor: newBatchProcessor(func(items []Metric) {
if len(items) == 0 {
return
}
event := NewEvent()
event.Timestamp = time.Now()
event.EventID = EventID(uuid())
event.Type = traceMetricEvent.Type
event.Metrics = items
client.Transport.SendEvent(event)
}),
}
}
func (p *metricBatchProcessor) Send(metric *Metric) bool {
return p.batchProcessor.Send(*metric)
}

241
vendor/github.com/getsentry/sentry-go/metrics.go generated vendored Normal file
View File

@ -0,0 +1,241 @@
package sentry
import (
"context"
"maps"
"os"
"sync"
"time"
"github.com/getsentry/sentry-go/attribute"
"github.com/getsentry/sentry-go/internal/debuglog"
)
// Duration Units.
const (
UnitNanosecond = "nanosecond"
UnitMicrosecond = "microsecond"
UnitMillisecond = "millisecond"
UnitSecond = "second"
UnitMinute = "minute"
UnitHour = "hour"
UnitDay = "day"
UnitWeek = "week"
)
// Information Units.
const (
UnitBit = "bit"
UnitByte = "byte"
UnitKilobyte = "kilobyte"
UnitKibibyte = "kibibyte"
UnitMegabyte = "megabyte"
UnitMebibyte = "mebibyte"
UnitGigabyte = "gigabyte"
UnitGibibyte = "gibibyte"
UnitTerabyte = "terabyte"
UnitTebibyte = "tebibyte"
UnitPetabyte = "petabyte"
UnitPebibyte = "pebibyte"
UnitExabyte = "exabyte"
UnitExbibyte = "exbibyte"
)
// Fraction Units.
const (
UnitRatio = "ratio"
UnitPercent = "percent"
)
// NewMeter returns a new Meter. If there is no Client bound to the current hub, or if metrics are disabled,
// it returns a no-op Meter that discards all metrics.
func NewMeter(ctx context.Context) Meter {
hub := GetHubFromContext(ctx)
if hub == nil {
hub = CurrentHub()
}
client := hub.Client()
if client != nil && !client.options.DisableMetrics {
// build default attrs
serverAddr := client.options.ServerName
if serverAddr == "" {
serverAddr, _ = os.Hostname()
}
defaults := map[string]string{
"sentry.release": client.options.Release,
"sentry.environment": client.options.Environment,
"sentry.server.address": serverAddr,
"sentry.sdk.name": client.sdkIdentifier,
"sentry.sdk.version": client.sdkVersion,
}
defaultAttrs := make(map[string]attribute.Value)
for k, v := range defaults {
if v != "" {
defaultAttrs[k] = attribute.StringValue(v)
}
}
return &sentryMeter{
ctx: ctx,
hub: hub,
attributes: make(map[string]attribute.Value),
defaultAttributes: defaultAttrs,
mu: sync.RWMutex{},
}
}
debuglog.Printf("fallback to noopMeter: metrics disabled")
return &noopMeter{}
}
type sentryMeter struct {
ctx context.Context
hub *Hub
attributes map[string]attribute.Value
defaultAttributes map[string]attribute.Value
mu sync.RWMutex
}
func (m *sentryMeter) emit(ctx context.Context, metricType MetricType, name string, value MetricValue, unit string, attributes map[string]attribute.Value, customScope *Scope) {
if name == "" {
debuglog.Println("empty name provided, dropping metric")
return
}
hub := hubFromContexts(ctx, m.ctx)
if hub == nil {
hub = m.hub
}
client := hub.Client()
if client == nil {
return
}
scope := hub.Scope()
if customScope != nil {
scope = customScope
}
traceID, spanID := resolveTrace(scope, ctx, m.ctx)
// Pre-allocate with capacity hint to avoid map growth reallocations
estimatedCap := len(m.defaultAttributes) + len(attributes) + 8 // scope ~3 + call-specific ~5
attrs := make(map[string]attribute.Value, estimatedCap)
// attribute precedence: default -> scope -> instance (from SetAttrs) -> entry-specific
for k, v := range m.defaultAttributes {
attrs[k] = v
}
scope.populateAttrs(attrs)
m.mu.RLock()
for k, v := range m.attributes {
attrs[k] = v
}
m.mu.RUnlock()
for k, v := range attributes {
attrs[k] = v
}
metric := &Metric{
Timestamp: time.Now(),
TraceID: traceID,
SpanID: spanID,
Type: metricType,
Name: name,
Value: value,
Unit: unit,
Attributes: attrs,
}
if client.captureMetric(metric, scope) && client.options.Debug {
debuglog.Printf("Metric %s [%s]: %v %s", metricType, name, value.AsInterface(), unit)
}
}
// WithCtx returns a new Meter that uses the given context for trace/span association.
func (m *sentryMeter) WithCtx(ctx context.Context) Meter {
m.mu.RLock()
attrsCopy := maps.Clone(m.attributes)
m.mu.RUnlock()
return &sentryMeter{
ctx: ctx,
hub: m.hub,
attributes: attrsCopy,
defaultAttributes: m.defaultAttributes,
mu: sync.RWMutex{},
}
}
func (m *sentryMeter) applyOptions(opts []MeterOption) *meterOptions {
o := &meterOptions{}
for _, opt := range opts {
opt(o)
}
return o
}
// Count implements Meter.
func (m *sentryMeter) Count(name string, count int64, opts ...MeterOption) {
o := m.applyOptions(opts)
m.emit(m.ctx, MetricTypeCounter, name, Int64MetricValue(count), o.unit, o.attributes, o.scope)
}
// Distribution implements Meter.
func (m *sentryMeter) Distribution(name string, sample float64, opts ...MeterOption) {
o := m.applyOptions(opts)
m.emit(m.ctx, MetricTypeDistribution, name, Float64MetricValue(sample), o.unit, o.attributes, o.scope)
}
// Gauge implements Meter.
func (m *sentryMeter) Gauge(name string, value float64, opts ...MeterOption) {
o := m.applyOptions(opts)
m.emit(m.ctx, MetricTypeGauge, name, Float64MetricValue(value), o.unit, o.attributes, o.scope)
}
// SetAttributes implements Meter.
func (m *sentryMeter) SetAttributes(attrs ...attribute.Builder) {
m.mu.Lock()
defer m.mu.Unlock()
for _, a := range attrs {
if a.Value.Type() == attribute.INVALID {
debuglog.Printf("invalid attribute: %v", a)
continue
}
m.attributes[a.Key] = a.Value
}
}
// noopMeter is a no-operation implementation of Meter.
// This is used when there is no client available in the context or when metrics are disabled.
type noopMeter struct{}
// WithCtx implements Meter.
func (n *noopMeter) WithCtx(_ context.Context) Meter {
return n
}
// Count implements Meter.
func (n *noopMeter) Count(name string, _ int64, _ ...MeterOption) {
debuglog.Printf("Metric %q is being dropped. Turn on metrics by setting DisableMetrics to false", name)
}
// Distribution implements Meter.
func (n *noopMeter) Distribution(name string, _ float64, _ ...MeterOption) {
debuglog.Printf("Metric %q is being dropped. Turn on metrics by setting DisableMetrics to false", name)
}
// Gauge implements Meter.
func (n *noopMeter) Gauge(name string, _ float64, _ ...MeterOption) {
debuglog.Printf("Metric %q is being dropped. Turn on metrics by setting DisableMetrics to false", name)
}
// SetAttributes implements Meter.
func (n *noopMeter) SetAttributes(_ ...attribute.Builder) {
debuglog.Printf("No attributes attached. Turn on metrics by setting DisableMetrics to false")
}

79
vendor/github.com/getsentry/sentry-go/mocks.go generated vendored Normal file
View File

@ -0,0 +1,79 @@
package sentry
import (
"context"
"sync"
"time"
)
// MockScope implements [Scope] for use in tests.
type MockScope struct {
breadcrumb *Breadcrumb
shouldDropEvent bool
}
func (scope *MockScope) AddBreadcrumb(breadcrumb *Breadcrumb, _ int) {
scope.breadcrumb = breadcrumb
}
func (scope *MockScope) ApplyToEvent(event *Event, _ *EventHint, _ *Client) *Event {
if scope.shouldDropEvent {
return nil
}
return event
}
// MockTransport implements [Transport] for use in tests.
type MockTransport struct {
mu sync.Mutex
events []*Event
lastEvent *Event
}
func (t *MockTransport) Configure(_ ClientOptions) {}
func (t *MockTransport) SendEvent(event *Event) {
t.mu.Lock()
defer t.mu.Unlock()
t.events = append(t.events, event)
t.lastEvent = event
}
func (t *MockTransport) Flush(_ time.Duration) bool {
return true
}
func (t *MockTransport) FlushWithContext(_ context.Context) bool { return true }
func (t *MockTransport) Events() []*Event {
t.mu.Lock()
defer t.mu.Unlock()
return t.events
}
func (t *MockTransport) Close() {}
// MockLogEntry implements [sentry.LogEntry] for use in tests.
type MockLogEntry struct {
Attributes map[string]any
}
func NewMockLogEntry() *MockLogEntry {
return &MockLogEntry{Attributes: make(map[string]any)}
}
func (m *MockLogEntry) WithCtx(_ context.Context) LogEntry { return m }
func (m *MockLogEntry) String(key, value string) LogEntry { m.Attributes[key] = value; return m }
func (m *MockLogEntry) Int(key string, value int) LogEntry {
m.Attributes[key] = int64(value)
return m
}
func (m *MockLogEntry) Int64(key string, value int64) LogEntry {
m.Attributes[key] = value
return m
}
func (m *MockLogEntry) Float64(key string, value float64) LogEntry {
m.Attributes[key] = value
return m
}
func (m *MockLogEntry) Bool(key string, value bool) LogEntry {
m.Attributes[key] = value
return m
}
func (m *MockLogEntry) Emit(...any) {}
func (m *MockLogEntry) Emitf(string, ...any) {}

View File

@ -0,0 +1,74 @@
package sentry
import (
"crypto/rand"
)
type PropagationContext struct {
TraceID TraceID `json:"trace_id"`
SpanID SpanID `json:"span_id"`
ParentSpanID SpanID `json:"parent_span_id,omitzero"`
DynamicSamplingContext DynamicSamplingContext `json:"-"`
}
func (p PropagationContext) Map() map[string]interface{} {
m := map[string]interface{}{
"trace_id": p.TraceID,
"span_id": p.SpanID,
}
if p.ParentSpanID != zeroSpanID {
m["parent_span_id"] = p.ParentSpanID
}
return m
}
func NewPropagationContext() PropagationContext {
p := PropagationContext{}
if _, err := rand.Read(p.TraceID[:]); err != nil {
panic(err)
}
if _, err := rand.Read(p.SpanID[:]); err != nil {
panic(err)
}
return p
}
func PropagationContextFromHeaders(trace, baggage string) (PropagationContext, error) {
p := NewPropagationContext()
if _, err := rand.Read(p.SpanID[:]); err != nil {
panic(err)
}
hasTrace := false
if trace != "" {
if tpc, valid := ParseTraceParentContext([]byte(trace)); valid {
hasTrace = true
p.TraceID = tpc.TraceID
p.ParentSpanID = tpc.ParentSpanID
}
}
if baggage != "" {
dsc, err := DynamicSamplingContextFromHeader([]byte(baggage))
if err != nil {
return PropagationContext{}, err
}
p.DynamicSamplingContext = dsc
}
// In case a sentry-trace header is present but there are no sentry-related
// values in the baggage, create an empty, frozen DynamicSamplingContext.
if hasTrace && !p.DynamicSamplingContext.HasEntries() {
p.DynamicSamplingContext = DynamicSamplingContext{
Frozen: true,
}
}
return p, nil
}

View File

@ -2,10 +2,14 @@ package sentry
import (
"bytes"
"context"
"io"
"net/http"
"sync"
"time"
"github.com/getsentry/sentry-go/attribute"
"github.com/getsentry/sentry-go/internal/debuglog"
)
// Scope holds contextual data for the current scope.
@ -25,13 +29,13 @@ import (
type Scope struct {
mu sync.RWMutex
breadcrumbs []*Breadcrumb
attachments []*Attachment
user User
tags map[string]string
contexts map[string]Context
extra map[string]interface{}
fingerprint []string
level Level
transaction string
request *http.Request
// requestBody holds a reference to the original request.Body.
requestBody interface {
@ -43,19 +47,22 @@ type Scope struct {
Overflow() bool
}
eventProcessors []EventProcessor
propagationContext PropagationContext
span *Span
}
// NewScope creates a new Scope.
func NewScope() *Scope {
scope := Scope{
breadcrumbs: make([]*Breadcrumb, 0),
tags: make(map[string]string),
contexts: make(map[string]Context),
extra: make(map[string]interface{}),
fingerprint: make([]string, 0),
return &Scope{
breadcrumbs: make([]*Breadcrumb, 0),
attachments: make([]*Attachment, 0),
tags: make(map[string]string),
contexts: make(map[string]Context),
extra: make(map[string]interface{}),
fingerprint: make([]string, 0),
propagationContext: NewPropagationContext(),
}
return &scope
}
// AddBreadcrumb adds new breadcrumb to the current scope
@ -82,6 +89,22 @@ func (scope *Scope) ClearBreadcrumbs() {
scope.breadcrumbs = []*Breadcrumb{}
}
// AddAttachment adds new attachment to the current scope.
func (scope *Scope) AddAttachment(attachment *Attachment) {
scope.mu.Lock()
defer scope.mu.Unlock()
scope.attachments = append(scope.attachments, attachment)
}
// ClearAttachments clears all attachments from the current scope.
func (scope *Scope) ClearAttachments() {
scope.mu.Lock()
defer scope.mu.Unlock()
scope.attachments = []*Attachment{}
}
// SetUser sets the user for the current scope.
func (scope *Scope) SetUser(user User) {
scope.mu.Lock()
@ -275,20 +298,28 @@ func (scope *Scope) SetLevel(level Level) {
scope.level = level
}
// SetTransaction sets the transaction name for the current transaction.
func (scope *Scope) SetTransaction(name string) {
// SetPropagationContext sets the propagation context for the current scope.
func (scope *Scope) SetPropagationContext(propagationContext PropagationContext) {
scope.mu.Lock()
defer scope.mu.Unlock()
scope.transaction = name
scope.propagationContext = propagationContext
}
// Transaction returns the transaction name for the current transaction.
func (scope *Scope) Transaction() (name string) {
// GetSpan returns the span from the current scope.
func (scope *Scope) GetSpan() *Span {
scope.mu.RLock()
defer scope.mu.RUnlock()
return scope.transaction
return scope.span
}
// SetSpan sets a span for the current scope.
func (scope *Scope) SetSpan(span *Span) {
scope.mu.Lock()
defer scope.mu.Unlock()
scope.span = span
}
// Clone returns a copy of the current scope with all data copied over.
@ -300,11 +331,13 @@ func (scope *Scope) Clone() *Scope {
clone.user = scope.user
clone.breadcrumbs = make([]*Breadcrumb, len(scope.breadcrumbs))
copy(clone.breadcrumbs, scope.breadcrumbs)
clone.attachments = make([]*Attachment, len(scope.attachments))
copy(clone.attachments, scope.attachments)
for key, value := range scope.tags {
clone.tags[key] = value
}
for key, value := range scope.contexts {
clone.contexts[key] = value
clone.contexts[key] = cloneContext(value)
}
for key, value := range scope.extra {
clone.extra[key] = value
@ -312,10 +345,11 @@ func (scope *Scope) Clone() *Scope {
clone.fingerprint = make([]string, len(scope.fingerprint))
copy(clone.fingerprint, scope.fingerprint)
clone.level = scope.level
clone.transaction = scope.transaction
clone.request = scope.request
clone.requestBody = scope.requestBody
clone.eventProcessors = scope.eventProcessors
clone.propagationContext = scope.propagationContext
clone.span = scope.span
return clone
}
@ -333,7 +367,7 @@ func (scope *Scope) AddEventProcessor(processor EventProcessor) {
}
// ApplyToEvent takes the data from the current scope and attaches it to the event.
func (scope *Scope) ApplyToEvent(event *Event, hint *EventHint) *Event {
func (scope *Scope) ApplyToEvent(event *Event, hint *EventHint, client *Client) *Event {
scope.mu.RLock()
defer scope.mu.RUnlock()
@ -341,6 +375,10 @@ func (scope *Scope) ApplyToEvent(event *Event, hint *EventHint) *Event {
event.Breadcrumbs = append(event.Breadcrumbs, scope.breadcrumbs...)
}
if len(scope.attachments) > 0 {
event.Attachments = append(event.Attachments, scope.attachments...)
}
if len(scope.tags) > 0 {
if event.Tags == nil {
event.Tags = make(map[string]string, len(scope.tags))
@ -368,11 +406,34 @@ func (scope *Scope) ApplyToEvent(event *Event, hint *EventHint) *Event {
// Ensure we are not overwriting event fields
if _, ok := event.Contexts[key]; !ok {
event.Contexts[key] = value
event.Contexts[key] = cloneContext(value)
}
}
}
if event.Contexts == nil {
event.Contexts = make(map[string]Context)
}
if scope.span != nil {
if _, ok := event.Contexts["trace"]; !ok {
event.Contexts["trace"] = scope.span.traceContext().Map()
}
transaction := scope.span.GetTransaction()
if transaction != nil {
event.sdkMetaData.dsc = DynamicSamplingContextFromTransaction(transaction)
}
} else {
event.Contexts["trace"] = scope.propagationContext.Map()
dsc := scope.propagationContext.DynamicSamplingContext
if !dsc.HasEntries() && client != nil {
dsc = DynamicSamplingContextFromScope(scope, client)
}
event.sdkMetaData.dsc = dsc
}
if len(scope.extra) > 0 {
if event.Extra == nil {
event.Extra = make(map[string]interface{}, len(scope.extra))
@ -395,10 +456,6 @@ func (scope *Scope) ApplyToEvent(event *Event, hint *EventHint) *Event {
event.Level = scope.level
}
if scope.transaction != "" {
event.Transaction = scope.transaction
}
if event.Request == nil && scope.request != nil {
event.Request = NewRequest(scope.request)
// NOTE: The SDK does not attempt to send partial request body data.
@ -419,10 +476,103 @@ func (scope *Scope) ApplyToEvent(event *Event, hint *EventHint) *Event {
id := event.EventID
event = processor(event, hint)
if event == nil {
Logger.Printf("Event dropped by one of the Scope EventProcessors: %s\n", id)
debuglog.Printf("Event dropped by one of the Scope EventProcessors: %s\n", id)
return nil
}
}
return event
}
// cloneContext returns a new context with keys and values copied from the passed one.
//
// Note: a new Context (map) is returned, but the function does NOT do
// a proper deep copy: if some context values are pointer types (e.g. maps),
// they won't be properly copied.
func cloneContext(c Context) Context {
res := make(Context, len(c))
for k, v := range c {
res[k] = v
}
return res
}
func (scope *Scope) populateAttrs(attrs map[string]attribute.Value) {
if scope == nil {
return
}
scope.mu.RLock()
defer scope.mu.RUnlock()
// Add user-related attributes
if !scope.user.IsEmpty() {
if scope.user.ID != "" {
attrs["user.id"] = attribute.StringValue(scope.user.ID)
}
if scope.user.Name != "" {
attrs["user.name"] = attribute.StringValue(scope.user.Name)
}
if scope.user.Email != "" {
attrs["user.email"] = attribute.StringValue(scope.user.Email)
}
}
// In the future, add scope.attributes here
// for k, v := range scope.attributes {
// attrs[k] = v
// }
}
// hubFromContexts is a helper to return the first hub found in the given contexts.
func hubFromContexts(ctxs ...context.Context) *Hub {
for _, ctx := range ctxs {
if ctx == nil {
continue
}
if hub := GetHubFromContext(ctx); hub != nil {
return hub
}
}
return nil
}
// resolveTrace resolves trace ID and span ID from the given scope and contexts.
//
// The resolution order follows a most-specific-to-least-specific pattern:
// 1. Check for span directly in contexts (SpanFromContext) - this is the most specific
// source as it represents a span explicitly attached to the current operation's context
// 2. Check scope's span - provides access to span set on the hub's scope
// 3. Fall back to scope's propagation context trace ID
//
// This ordering ensures we always use the most contextually relevant tracing information.
// For example, if a specific span is active for an operation, we use that span's trace/span IDs
// rather than accidentally using a different span that might be set on the hub's scope.
func resolveTrace(scope *Scope, ctxs ...context.Context) (traceID TraceID, spanID SpanID) {
var span *Span
for _, ctx := range ctxs {
if ctx == nil {
continue
}
if span = SpanFromContext(ctx); span != nil {
break
}
}
if scope != nil {
scope.mu.RLock()
if span == nil {
span = scope.span
}
if span != nil {
traceID = span.TraceID
spanID = span.SpanID
} else {
traceID = scope.propagationContext.TraceID
}
scope.mu.RUnlock()
}
return traceID, spanID
}

View File

@ -5,22 +5,13 @@ import (
"time"
)
// Deprecated: Use SDKVersion instead.
const Version = SDKVersion
// Version is the version of the SDK.
const SDKVersion = "0.16.0"
// The identifier of the SDK.
const SDKIdentifier = "sentry.go"
// The version of the SDK.
const SDKVersion = "0.43.0"
// apiVersion is the minimum version of the Sentry API compatible with the
// sentry-go SDK.
const apiVersion = "7"
// userAgent is the User-Agent of outgoing HTTP requests.
const userAgent = "sentry-go/" + SDKVersion
// Init initializes the SDK with options. The returned error is non-nil if
// options is invalid, for instance if a malformed DSN is provided.
func Init(options ClientOptions) error {
@ -54,6 +45,12 @@ func CaptureException(exception error) *EventID {
return hub.CaptureException(exception)
}
// CaptureCheckIn captures a (cron) monitor check-in.
func CaptureCheckIn(checkIn *CheckIn, monitorConfig *MonitorConfig) *EventID {
hub := CurrentHub()
return hub.CaptureCheckIn(checkIn, monitorConfig)
}
// CaptureEvent captures an event on the currently active client if any.
//
// The event must already be assembled. Typically code would instead use
@ -75,18 +72,17 @@ func Recover() *EventID {
// RecoverWithContext captures a panic and passes relevant context object.
func RecoverWithContext(ctx context.Context) *EventID {
if err := recover(); err != nil {
var hub *Hub
if HasHubOnContext(ctx) {
hub = GetHubFromContext(ctx)
} else {
hub = CurrentHub()
}
return hub.RecoverWithContext(ctx, err)
err := recover()
if err == nil {
return nil
}
return nil
hub := GetHubFromContext(ctx)
if hub == nil {
hub = CurrentHub()
}
return hub.RecoverWithContext(ctx, err)
}
// WithScope is a shorthand for CurrentHub().WithScope.
@ -129,6 +125,23 @@ func Flush(timeout time.Duration) bool {
return hub.Flush(timeout)
}
// FlushWithContext waits until the underlying Transport sends any buffered events
// to the Sentry server, blocking for at most the duration specified by the context.
// It returns false if the context is canceled before the events are sent. In such a case,
// some events may not be delivered.
//
// FlushWithContext should be called before terminating the program to ensure no
// events are unintentionally dropped.
//
// Avoid calling FlushWithContext indiscriminately after each call to CaptureEvent,
// CaptureException, or CaptureMessage. To send events synchronously over the network,
// configure the SDK to use HTTPSyncTransport during initialization with Init.
func FlushWithContext(ctx context.Context) bool {
hub := CurrentHub()
return hub.FlushWithContext(ctx)
}
// LastEventID returns an ID of last captured event.
func LastEventID() EventID {
hub := CurrentHub()

View File

@ -2,6 +2,8 @@ package sentry
import (
"sync"
"github.com/getsentry/sentry-go/internal/debuglog"
)
// A spanRecorder stores a span tree that makes up a transaction. Safe for
@ -17,14 +19,14 @@ type spanRecorder struct {
func (r *spanRecorder) record(s *Span) {
maxSpans := defaultMaxSpans
if client := CurrentHub().Client(); client != nil {
maxSpans = client.Options().MaxSpans
maxSpans = client.options.MaxSpans
}
r.mu.Lock()
defer r.mu.Unlock()
if len(r.spans) >= maxSpans {
r.overflowOnce.Do(func() {
root := r.spans[0]
Logger.Printf("Too many spans: dropping spans from transaction with TraceID=%s SpanID=%s limit=%d",
debuglog.Printf("Too many spans: dropping spans from transaction with TraceID=%s SpanID=%s limit=%d",
root.TraceID, root.SpanID, maxSpans)
})
// TODO(tracing): mark the transaction event in some way to

View File

@ -2,9 +2,9 @@ package sentry
import (
"go/build"
"path/filepath"
"reflect"
"runtime"
"slices"
"strings"
)
@ -32,8 +32,8 @@ func NewStacktrace() *Stacktrace {
return nil
}
frames := extractFrames(pcs[:n])
frames = filterFrames(frames)
runtimeFrames := extractFrames(pcs[:n])
frames := createFrames(runtimeFrames)
stacktrace := Stacktrace{
Frames: frames,
@ -62,8 +62,8 @@ func ExtractStacktrace(err error) *Stacktrace {
return nil
}
frames := extractFrames(pcs)
frames = filterFrames(frames)
runtimeFrames := extractFrames(pcs)
frames := createFrames(runtimeFrames)
stacktrace := Stacktrace{
Frames: frames,
@ -166,13 +166,7 @@ type Frame struct {
Symbol string `json:"symbol,omitempty"`
// Module is, despite the name, the Sentry protocol equivalent of a Go
// package's import path.
Module string `json:"module,omitempty"`
// Package is not used for Go stack trace frames. In other platforms it
// refers to a container where the Module can be found. For example, a
// Java JAR, a .NET Assembly, or a native dynamic library.
// It exists for completeness, allowing the construction and reporting
// of custom event payloads.
Package string `json:"package,omitempty"`
Module string `json:"module,omitempty"`
Filename string `json:"filename,omitempty"`
AbsPath string `json:"abs_path,omitempty"`
Lineno int `json:"lineno,omitempty"`
@ -180,40 +174,24 @@ type Frame struct {
PreContext []string `json:"pre_context,omitempty"`
ContextLine string `json:"context_line,omitempty"`
PostContext []string `json:"post_context,omitempty"`
InApp bool `json:"in_app,omitempty"`
InApp bool `json:"in_app"`
Vars map[string]interface{} `json:"vars,omitempty"`
// Package and the below are not used for Go stack trace frames. In
// other platforms it refers to a container where the Module can be
// found. For example, a Java JAR, a .NET Assembly, or a native
// dynamic library. They exists for completeness, allowing the
// construction and reporting of custom event payloads.
Package string `json:"package,omitempty"`
InstructionAddr string `json:"instruction_addr,omitempty"`
AddrMode string `json:"addr_mode,omitempty"`
SymbolAddr string `json:"symbol_addr,omitempty"`
ImageAddr string `json:"image_addr,omitempty"`
Platform string `json:"platform,omitempty"`
StackStart bool `json:"stack_start,omitempty"`
}
// NewFrame assembles a stacktrace frame out of runtime.Frame.
func NewFrame(f runtime.Frame) Frame {
var abspath, relpath string
// NOTE: f.File paths historically use forward slash as path separator even
// on Windows, though this is not yet documented, see
// https://golang.org/issues/3335. In any case, filepath.IsAbs can work with
// paths with either slash or backslash on Windows.
switch {
case f.File == "":
relpath = unknown
// Leave abspath as the empty string to be omitted when serializing
// event as JSON.
abspath = ""
case filepath.IsAbs(f.File):
abspath = f.File
// TODO: in the general case, it is not trivial to come up with a
// "project relative" path with the data we have in run time.
// We shall not use filepath.Base because it creates ambiguous paths and
// affects the "Suspect Commits" feature.
// For now, leave relpath empty to be omitted when serializing the event
// as JSON. Improve this later.
relpath = ""
default:
// f.File is a relative path. This may happen when the binary is built
// with the -trimpath flag.
relpath = f.File
// Omit abspath when serializing the event as JSON.
abspath = ""
}
function := f.Function
var pkg string
@ -221,15 +199,56 @@ func NewFrame(f runtime.Frame) Frame {
pkg, function = splitQualifiedFunctionName(function)
}
return newFrame(pkg, function, f.File, f.Line)
}
// Like filepath.IsAbs() but doesn't care what platform you run this on.
// I.e. it also recognizies `/path/to/file` when run on Windows.
func isAbsPath(path string) bool {
if len(path) == 0 {
return false
}
// If the volume name starts with a double slash, this is an absolute path.
if len(path) >= 1 && (path[0] == '/' || path[0] == '\\') {
return true
}
// Windows absolute path, see https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats
if len(path) >= 3 && path[1] == ':' && (path[2] == '/' || path[2] == '\\') {
return true
}
return false
}
func newFrame(module string, function string, file string, line int) Frame {
frame := Frame{
AbsPath: abspath,
Filename: relpath,
Lineno: f.Line,
Module: pkg,
Lineno: line,
Module: module,
Function: function,
}
frame.InApp = isInAppFrame(frame)
switch {
case len(file) == 0:
frame.Filename = unknown
// Leave abspath as the empty string to be omitted when serializing event as JSON.
case isAbsPath(file):
frame.AbsPath = file
// TODO: in the general case, it is not trivial to come up with a
// "project relative" path with the data we have in run time.
// We shall not use filepath.Base because it creates ambiguous paths and
// affects the "Suspect Commits" feature.
// For now, leave relpath empty to be omitted when serializing the event
// as JSON. Improve this later.
default:
// f.File is a relative path. This may happen when the binary is built
// with the -trimpath flag.
frame.Filename = file
// Omit abspath when serializing the event as JSON.
}
setInAppFrame(&frame)
return frame
}
@ -239,67 +258,85 @@ func NewFrame(f runtime.Frame) Frame {
// runtime.Frame.Function values.
func splitQualifiedFunctionName(name string) (pkg string, fun string) {
pkg = packageName(name)
fun = strings.TrimPrefix(name, pkg+".")
if len(pkg) > 0 {
fun = name[len(pkg)+1:]
}
return
}
func extractFrames(pcs []uintptr) []Frame {
var frames = make([]Frame, 0, len(pcs))
func extractFrames(pcs []uintptr) []runtime.Frame {
var frames = make([]runtime.Frame, 0, len(pcs))
callersFrames := runtime.CallersFrames(pcs)
for {
callerFrame, more := callersFrames.Next()
frames = append(frames, NewFrame(callerFrame))
frames = append(frames, callerFrame)
if !more {
break
}
}
// reverse
for i, j := 0, len(frames)-1; i < j; i, j = i+1, j-1 {
frames[i], frames[j] = frames[j], frames[i]
}
slices.Reverse(frames)
return frames
}
// filterFrames filters out stack frames that are not meant to be reported to
// Sentry. Those are frames internal to the SDK or Go.
func filterFrames(frames []Frame) []Frame {
// createFrames creates Frame objects while filtering out frames that are not
// meant to be reported to Sentry, those are frames internal to the SDK or Go.
func createFrames(frames []runtime.Frame) []Frame {
if len(frames) == 0 {
return nil
}
// reuse
filteredFrames := frames[:0]
result := make([]Frame, 0, len(frames))
for _, frame := range frames {
// Skip Go internal frames.
if frame.Module == "runtime" || frame.Module == "testing" {
continue
function := frame.Function
var pkg string
if function != "" {
pkg, function = splitQualifiedFunctionName(function)
}
// Skip Sentry internal frames, except for frames in _test packages (for
// testing).
if strings.HasPrefix(frame.Module, "github.com/getsentry/sentry-go") &&
!strings.HasSuffix(frame.Module, "_test") {
continue
if !shouldSkipFrame(pkg) {
result = append(result, newFrame(pkg, function, frame.File, frame.Line))
}
filteredFrames = append(filteredFrames, frame)
}
return filteredFrames
// Fix issues grouping errors with the new fully qualified function names
// introduced from Go 1.21
result = cleanupFunctionNamePrefix(result)
return result
}
func isInAppFrame(frame Frame) bool {
if strings.HasPrefix(frame.AbsPath, build.Default.GOROOT) ||
strings.Contains(frame.Module, "vendor") ||
strings.Contains(frame.Module, "third_party") {
return false
// TODO ID: why do we want to do this?
// I'm not aware of other SDKs skipping all Sentry frames, regardless of their position in the stactrace.
// For example, in the .NET SDK, only the first frames are skipped until the call to the SDK.
// As is, this will also hide any intermediate frames in the stack and make debugging issues harder.
func shouldSkipFrame(module string) bool {
// Skip Go internal frames.
if module == "runtime" || module == "testing" {
return true
}
return true
// Skip Sentry internal frames, except for frames in _test packages (for testing).
if strings.HasPrefix(module, "github.com/getsentry/sentry-go") &&
!strings.HasSuffix(module, "_test") {
return true
}
return false
}
// On Windows, GOROOT has backslashes, but we want forward slashes.
var goRoot = strings.ReplaceAll(build.Default.GOROOT, "\\", "/")
func setInAppFrame(frame *Frame) {
frame.InApp = true
if strings.HasPrefix(frame.AbsPath, goRoot) || strings.Contains(frame.Module, "vendor") ||
strings.Contains(frame.Module, "third_party") {
frame.InApp = false
}
}
func callerFunctionName() string {
@ -315,9 +352,7 @@ func callerFunctionName() string {
// It replicates https://golang.org/pkg/debug/gosym/#Sym.PackageName, avoiding a
// dependency on debug/gosym.
func packageName(name string) string {
// A prefix of "type." and "go." is a compiler-generated symbol that doesn't belong to any package.
// See variable reservedimports in cmd/compile/internal/gc/subr.go
if strings.HasPrefix(name, "go.") || strings.HasPrefix(name, "type.") {
if isCompilerGeneratedSymbol(name) {
return ""
}
@ -341,3 +376,32 @@ func baseName(name string) string {
}
return name
}
func isCompilerGeneratedSymbol(name string) bool {
// In versions of Go 1.20 and above a prefix of "type:" and "go:" is a
// compiler-generated symbol that doesn't belong to any package.
// See variable reservedimports in cmd/compile/internal/gc/subr.go
if strings.HasPrefix(name, "go:") || strings.HasPrefix(name, "type:") {
return true
}
return false
}
// Walk backwards through the results and for the current function name
// remove it's parent function's prefix, leaving only it's actual name. This
// fixes issues grouping errors with the new fully qualified function names
// introduced from Go 1.21.
func cleanupFunctionNamePrefix(f []Frame) []Frame {
for i := len(f) - 1; i > 0; i-- {
name := f[i].Function
parentName := f[i-1].Function + "."
if !strings.HasPrefix(name, parentName) {
continue
}
f[i].Function = name[len(parentName):]
}
return f
}

File diff suppressed because it is too large Load Diff

View File

@ -2,6 +2,7 @@ package sentry
import (
"bytes"
"context"
"crypto/tls"
"encoding/json"
"errors"
@ -12,28 +13,25 @@ import (
"sync"
"time"
"github.com/getsentry/sentry-go/internal/debuglog"
httpinternal "github.com/getsentry/sentry-go/internal/http"
"github.com/getsentry/sentry-go/internal/protocol"
"github.com/getsentry/sentry-go/internal/ratelimit"
"github.com/getsentry/sentry-go/internal/util"
)
const defaultBufferSize = 30
const defaultTimeout = time.Second * 30
// maxDrainResponseBytes is the maximum number of bytes that transport
// implementations will read from response bodies when draining them.
//
// Sentry's ingestion API responses are typically short and the SDK doesn't need
// the contents of the response body. However, the net/http HTTP client requires
// response bodies to be fully drained (and closed) for TCP keep-alive to work.
//
// maxDrainResponseBytes strikes a balance between reading too much data (if the
// server is misbehaving) and reusing TCP connections.
const maxDrainResponseBytes = 16 << 10
const (
defaultBufferSize = 1000
defaultTimeout = time.Second * 30
)
// Transport is used by the Client to deliver events to remote server.
type Transport interface {
Flush(timeout time.Duration) bool
FlushWithContext(ctx context.Context) bool
Configure(options ClientOptions)
SendEvent(event *Event)
Close()
}
func getProxyConfig(options ClientOptions) func(*http.Request) (*url.URL, error) {
@ -82,19 +80,101 @@ func getRequestBodyFromEvent(event *Event) []byte {
}
body, err = json.Marshal(event)
if err == nil {
Logger.Println(msg)
debuglog.Println(msg)
return body
}
// This should _only_ happen when Event.Exception[0].Stacktrace.Frames[0].Vars is unserializable
// Which won't ever happen, as we don't use it now (although it's the part of public interface accepted by Sentry)
// Juuust in case something, somehow goes utterly wrong.
Logger.Println("Event couldn't be marshaled, even with stripped contextual data. Skipping delivery. " +
debuglog.Println("Event couldn't be marshaled, even with stripped contextual data. Skipping delivery. " +
"Please notify the SDK owners with possibly broken payload.")
return nil
}
func transactionEnvelopeFromBody(event *Event, dsn *Dsn, sentAt time.Time, body json.RawMessage) (*bytes.Buffer, error) {
func encodeAttachment(enc *json.Encoder, b io.Writer, attachment *Attachment) error {
// Attachment header
err := enc.Encode(struct {
Type string `json:"type"`
Length int `json:"length"`
Filename string `json:"filename"`
ContentType string `json:"content_type,omitempty"`
}{
Type: "attachment",
Length: len(attachment.Payload),
Filename: attachment.Filename,
ContentType: attachment.ContentType,
})
if err != nil {
return err
}
// Attachment payload
if _, err = b.Write(attachment.Payload); err != nil {
return err
}
// "Envelopes should be terminated with a trailing newline."
//
// [1]: https://develop.sentry.dev/sdk/envelopes/#envelopes
if _, err := b.Write([]byte("\n")); err != nil {
return err
}
return nil
}
func encodeEnvelopeItem(enc *json.Encoder, itemType string, body json.RawMessage) error {
// Item header
err := enc.Encode(struct {
Type string `json:"type"`
Length int `json:"length"`
}{
Type: itemType,
Length: len(body),
})
if err == nil {
// payload
err = enc.Encode(body)
}
return err
}
func encodeEnvelopeLogs(enc *json.Encoder, count int, body json.RawMessage) error {
err := enc.Encode(
struct {
Type string `json:"type"`
ItemCount int `json:"item_count"`
ContentType string `json:"content_type"`
}{
Type: logEvent.Type,
ItemCount: count,
ContentType: logEvent.ContentType,
})
if err == nil {
err = enc.Encode(body)
}
return err
}
func encodeEnvelopeMetrics(enc *json.Encoder, count int, body json.RawMessage) error {
err := enc.Encode(
struct {
Type string `json:"type"`
ItemCount int `json:"item_count"`
ContentType string `json:"content_type"`
}{
Type: traceMetricEvent.Type,
ItemCount: count,
ContentType: traceMetricEvent.ContentType,
})
if err == nil {
err = enc.Encode(body)
}
return err
}
func envelopeFromBody(event *Event, dsn *Dsn, sentAt time.Time, body json.RawMessage) (*bytes.Buffer, error) {
var b bytes.Buffer
enc := json.NewEncoder(&b)
@ -127,63 +207,71 @@ func transactionEnvelopeFromBody(event *Event, dsn *Dsn, sentAt time.Time, body
return nil, err
}
// Item header
err = enc.Encode(struct {
Type string `json:"type"`
Length int `json:"length"`
}{
Type: transactionType,
Length: len(body),
})
switch event.Type {
case transactionType, checkInType:
err = encodeEnvelopeItem(enc, event.Type, body)
case logEvent.Type:
err = encodeEnvelopeLogs(enc, len(event.Logs), body)
case traceMetricEvent.Type:
err = encodeEnvelopeMetrics(enc, len(event.Metrics), body)
default:
err = encodeEnvelopeItem(enc, eventType, body)
}
if err != nil {
return nil, err
}
// payload
err = enc.Encode(body)
if err != nil {
return nil, err
// Attachments
for _, attachment := range event.Attachments {
if err := encodeAttachment(enc, &b, attachment); err != nil {
return nil, err
}
}
return &b, nil
}
func getRequestFromEvent(event *Event, dsn *Dsn) (r *http.Request, err error) {
func getRequestFromEvent(ctx context.Context, event *Event, dsn *Dsn) (r *http.Request, err error) {
defer func() {
if r != nil {
r.Header.Set("User-Agent", userAgent)
r.Header.Set("User-Agent", fmt.Sprintf("%s/%s", event.Sdk.Name, event.Sdk.Version))
r.Header.Set("Content-Type", "application/x-sentry-envelope")
auth := fmt.Sprintf("Sentry sentry_version=%s, "+
"sentry_client=%s/%s, sentry_key=%s", apiVersion, event.Sdk.Name, event.Sdk.Version, dsn.GetPublicKey())
// The key sentry_secret is effectively deprecated and no longer needs to be set.
// However, since it was required in older self-hosted versions,
// it should still passed through to Sentry if set.
if dsn.GetSecretKey() != "" {
auth = fmt.Sprintf("%s, sentry_secret=%s", auth, dsn.GetSecretKey())
}
r.Header.Set("X-Sentry-Auth", auth)
}
}()
body := getRequestBodyFromEvent(event)
if body == nil {
return nil, errors.New("event could not be marshaled")
}
if event.Type == transactionType {
b, err := transactionEnvelopeFromBody(event, dsn, time.Now(), body)
if err != nil {
return nil, err
}
return http.NewRequest(
http.MethodPost,
dsn.EnvelopeAPIURL().String(),
b,
)
}
return http.NewRequest(
http.MethodPost,
dsn.StoreAPIURL().String(),
bytes.NewReader(body),
)
}
func categoryFor(eventType string) ratelimit.Category {
switch eventType {
case "":
return ratelimit.CategoryError
case transactionType:
return ratelimit.CategoryTransaction
default:
return ratelimit.Category(eventType)
envelope, err := envelopeFromBody(event, dsn, time.Now(), body)
if err != nil {
return nil, err
}
if ctx == nil {
ctx = context.Background()
}
return http.NewRequestWithContext(
ctx,
http.MethodPost,
dsn.GetAPIURL().String(),
envelope,
)
}
// ================================
@ -198,8 +286,9 @@ type batch struct {
}
type batchItem struct {
request *http.Request
category ratelimit.Category
request *http.Request
category ratelimit.Category
eventIdentifier string
}
// HTTPTransport is the default, non-blocking, implementation of Transport.
@ -216,7 +305,8 @@ type HTTPTransport struct {
// current in-flight items and starts a new batch for subsequent events.
buffer chan batch
start sync.Once
startOnce sync.Once
closeOnce sync.Once
// Size of the transport buffer. Defaults to 30.
BufferSize int
@ -225,6 +315,9 @@ type HTTPTransport struct {
mu sync.RWMutex
limits ratelimit.Map
// receiving signal will terminate worker.
done chan struct{}
}
// NewHTTPTransport returns a new pre-configured instance of HTTPTransport.
@ -232,7 +325,7 @@ func NewHTTPTransport() *HTTPTransport {
transport := HTTPTransport{
BufferSize: defaultBufferSize,
Timeout: defaultTimeout,
limits: make(ratelimit.Map),
done: make(chan struct{}),
}
return &transport
}
@ -241,7 +334,7 @@ func NewHTTPTransport() *HTTPTransport {
func (t *HTTPTransport) Configure(options ClientOptions) {
dsn, err := NewDsn(options.Dsn)
if err != nil {
Logger.Printf("%v\n", err)
debuglog.Printf("%v\n", err)
return
}
t.dsn = dsn
@ -274,32 +367,33 @@ func (t *HTTPTransport) Configure(options ClientOptions) {
}
}
t.start.Do(func() {
t.startOnce.Do(func() {
go t.worker()
})
}
// SendEvent assembles a new packet out of Event and sends it to remote server.
// SendEvent assembles a new packet out of Event and sends it to the remote server.
func (t *HTTPTransport) SendEvent(event *Event) {
t.SendEventWithContext(context.Background(), event)
}
// SendEventWithContext assembles a new packet out of Event and sends it to the remote server.
func (t *HTTPTransport) SendEventWithContext(ctx context.Context, event *Event) {
if t.dsn == nil {
return
}
category := categoryFor(event.Type)
category := event.toCategory()
if t.disabled(category) {
return
}
request, err := getRequestFromEvent(event, t.dsn)
request, err := getRequestFromEvent(ctx, event, t.dsn)
if err != nil {
return
}
for headerKey, headerValue := range t.dsn.RequestHeaders() {
request.Header.Set(headerKey, headerValue)
}
// <-t.buffer is equivalent to acquiring a lock to access the current batch.
// A few lines below, t.buffer <- b releases the lock.
//
@ -313,26 +407,22 @@ func (t *HTTPTransport) SendEvent(event *Event) {
// channel (used as a queue).
b := <-t.buffer
identifier := eventIdentifier(event)
select {
case b.items <- batchItem{
request: request,
category: category,
request: request,
category: category,
eventIdentifier: identifier,
}:
var eventType string
if event.Type == transactionType {
eventType = "transaction"
} else {
eventType = fmt.Sprintf("%s event", event.Level)
}
Logger.Printf(
"Sending %s [%s] to %s project: %s",
eventType,
event.EventID,
t.dsn.host,
t.dsn.projectID,
debuglog.Printf(
"Sending %s to %s project: %s",
identifier,
t.dsn.GetHost(),
t.dsn.GetProjectID(),
)
default:
Logger.Println("Event dropped due to transport buffer being full.")
debuglog.Println("Event dropped due to transport buffer being full.")
}
t.buffer <- b
@ -349,8 +439,17 @@ func (t *HTTPTransport) SendEvent(event *Event) {
// have the SDK send events over the network synchronously, configure it to use
// the HTTPSyncTransport in the call to Init.
func (t *HTTPTransport) Flush(timeout time.Duration) bool {
toolate := time.After(timeout)
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
return t.FlushWithContext(ctx)
}
// FlushWithContext works like Flush, but it accepts a context.Context instead of a timeout.
func (t *HTTPTransport) FlushWithContext(ctx context.Context) bool {
return t.flushInternal(ctx.Done())
}
func (t *HTTPTransport) flushInternal(timeout <-chan struct{}) bool {
// Wait until processing the current batch has started or the timeout.
//
// We must wait until the worker has seen the current batch, because it is
@ -358,6 +457,7 @@ func (t *HTTPTransport) Flush(timeout time.Duration) bool {
// possible execution flow in which b.done is never closed, and the only way
// out of Flush would be waiting for the timeout, which is undesired.
var b batch
for {
select {
case b = <-t.buffer:
@ -367,7 +467,7 @@ func (t *HTTPTransport) Flush(timeout time.Duration) bool {
default:
t.buffer <- b
}
case <-toolate:
case <-timeout:
goto fail
}
}
@ -386,17 +486,28 @@ started:
// Wait until the current batch is done or the timeout.
select {
case <-b.done:
Logger.Println("Buffer flushed successfully.")
debuglog.Println("Buffer flushed successfully.")
return true
case <-toolate:
case <-timeout:
goto fail
}
fail:
Logger.Println("Buffer flushing reached the timeout.")
debuglog.Println("Buffer flushing was canceled or timed out.")
return false
}
// Close will terminate events sending loop.
// It useful to prevent goroutines leak in case of multiple HTTPTransport instances initiated.
//
// Close should be called after Flush and before terminating the program
// otherwise some events may be lost.
func (t *HTTPTransport) Close() {
t.closeOnce.Do(func() {
close(t.done)
})
}
func (t *HTTPTransport) worker() {
for b := range t.buffer {
// Signal that processing of the current batch has started.
@ -407,23 +518,38 @@ func (t *HTTPTransport) worker() {
t.buffer <- b
// Process all batch items.
for item := range b.items {
if t.disabled(item.category) {
continue
}
loop:
for {
select {
case <-t.done:
return
case item, open := <-b.items:
if !open {
break loop
}
if t.disabled(item.category) {
continue
}
response, err := t.client.Do(item.request)
if err != nil {
Logger.Printf("There was an issue with sending an event: %v", err)
continue
response, err := t.client.Do(item.request)
if err != nil {
debuglog.Printf("There was an issue with sending an event: %v", err)
continue
}
util.HandleHTTPResponse(response, item.eventIdentifier)
t.mu.Lock()
if t.limits == nil {
t.limits = make(ratelimit.Map)
}
t.limits.Merge(ratelimit.FromResponse(response))
t.mu.Unlock()
// Drain body up to a limit and close it, allowing the
// transport to reuse TCP connections.
_, _ = io.CopyN(io.Discard, response.Body, util.MaxDrainResponseBytes)
response.Body.Close()
}
t.mu.Lock()
t.limits.Merge(ratelimit.FromResponse(response))
t.mu.Unlock()
// Drain body up to a limit and close it, allowing the
// transport to reuse TCP connections.
_, _ = io.CopyN(io.Discard, response.Body, maxDrainResponseBytes)
response.Body.Close()
}
// Signal that processing of the batch is done.
@ -436,7 +562,7 @@ func (t *HTTPTransport) disabled(c ratelimit.Category) bool {
defer t.mu.RUnlock()
disabled := t.limits.IsRateLimited(c)
if disabled {
Logger.Printf("Too many requests for %q, backing off till: %v", c, t.limits.Deadline(c))
debuglog.Printf("Too many requests for %q, backing off till: %v", c, t.limits.Deadline(c))
}
return disabled
}
@ -482,7 +608,7 @@ func NewHTTPSyncTransport() *HTTPSyncTransport {
func (t *HTTPSyncTransport) Configure(options ClientOptions) {
dsn, err := NewDsn(options.Dsn)
if err != nil {
Logger.Printf("%v\n", err)
debuglog.Printf("%v\n", err)
return
}
t.dsn = dsn
@ -506,51 +632,54 @@ func (t *HTTPSyncTransport) Configure(options ClientOptions) {
}
}
// SendEvent assembles a new packet out of Event and sends it to remote server.
// SendEvent assembles a new packet out of Event and sends it to the remote server.
func (t *HTTPSyncTransport) SendEvent(event *Event) {
t.SendEventWithContext(context.Background(), event)
}
func (t *HTTPSyncTransport) Close() {}
// SendEventWithContext assembles a new packet out of Event and sends it to the remote server.
func (t *HTTPSyncTransport) SendEventWithContext(ctx context.Context, event *Event) {
if t.dsn == nil {
return
}
if t.disabled(categoryFor(event.Type)) {
if t.disabled(event.toCategory()) {
return
}
request, err := getRequestFromEvent(event, t.dsn)
request, err := getRequestFromEvent(ctx, event, t.dsn)
if err != nil {
return
}
for headerKey, headerValue := range t.dsn.RequestHeaders() {
request.Header.Set(headerKey, headerValue)
}
var eventType string
if event.Type == transactionType {
eventType = "transaction"
} else {
eventType = fmt.Sprintf("%s event", event.Level)
}
Logger.Printf(
"Sending %s [%s] to %s project: %s",
eventType,
event.EventID,
t.dsn.host,
t.dsn.projectID,
identifier := eventIdentifier(event)
debuglog.Printf(
"Sending %s to %s project: %s",
identifier,
t.dsn.GetHost(),
t.dsn.GetProjectID(),
)
response, err := t.client.Do(request)
if err != nil {
Logger.Printf("There was an issue with sending an event: %v", err)
debuglog.Printf("There was an issue with sending an event: %v", err)
return
}
util.HandleHTTPResponse(response, identifier)
t.mu.Lock()
if t.limits == nil {
t.limits = make(ratelimit.Map)
}
t.limits.Merge(ratelimit.FromResponse(response))
t.mu.Unlock()
// Drain body up to a limit and close it, allowing the
// transport to reuse TCP connections.
_, _ = io.CopyN(io.Discard, response.Body, maxDrainResponseBytes)
_, _ = io.CopyN(io.Discard, response.Body, util.MaxDrainResponseBytes)
response.Body.Close()
}
@ -559,12 +688,17 @@ func (t *HTTPSyncTransport) Flush(_ time.Duration) bool {
return true
}
// FlushWithContext is a no-op for HTTPSyncTransport. It always returns true immediately.
func (t *HTTPSyncTransport) FlushWithContext(_ context.Context) bool {
return true
}
func (t *HTTPSyncTransport) disabled(c ratelimit.Category) bool {
t.mu.Lock()
defer t.mu.Unlock()
disabled := t.limits.IsRateLimited(c)
if disabled {
Logger.Printf("Too many requests for %q, backing off till: %v", c, t.limits.Deadline(c))
debuglog.Printf("Too many requests for %q, backing off till: %v", c, t.limits.Deadline(c))
}
return disabled
}
@ -580,13 +714,98 @@ type noopTransport struct{}
var _ Transport = noopTransport{}
func (noopTransport) Configure(ClientOptions) {
Logger.Println("Sentry client initialized with an empty DSN. Using noopTransport. No events will be delivered.")
debuglog.Println("Sentry client initialized with an empty DSN. Using noopTransport. No events will be delivered.")
}
func (noopTransport) SendEvent(*Event) {
Logger.Println("Event dropped due to noopTransport usage.")
debuglog.Println("Event dropped due to noopTransport usage.")
}
func (noopTransport) Flush(time.Duration) bool {
return true
}
func (noopTransport) FlushWithContext(context.Context) bool {
return true
}
func (noopTransport) Close() {}
// ================================
// Internal Transport Adapters
// ================================
// newInternalAsyncTransport creates a new AsyncTransport from internal/http
// wrapped to satisfy the Transport interface.
//
// This is not yet exposed in the public API and is for internal experimentation.
func newInternalAsyncTransport() Transport {
return &internalAsyncTransportAdapter{}
}
// internalAsyncTransportAdapter wraps the internal AsyncTransport to implement
// the root-level Transport interface.
type internalAsyncTransportAdapter struct {
transport protocol.TelemetryTransport
dsn *protocol.Dsn
}
func (a *internalAsyncTransportAdapter) Configure(options ClientOptions) {
transportOptions := httpinternal.TransportOptions{
Dsn: options.Dsn,
HTTPClient: options.HTTPClient,
HTTPTransport: options.HTTPTransport,
HTTPProxy: options.HTTPProxy,
HTTPSProxy: options.HTTPSProxy,
CaCerts: options.CaCerts,
}
a.transport = httpinternal.NewAsyncTransport(transportOptions)
if options.Dsn != "" {
dsn, err := protocol.NewDsn(options.Dsn)
if err != nil {
debuglog.Printf("Failed to parse DSN in adapter: %v\n", err)
} else {
a.dsn = dsn
}
}
}
func (a *internalAsyncTransportAdapter) SendEvent(event *Event) {
header := &protocol.EnvelopeHeader{EventID: string(event.EventID), SentAt: time.Now(), Sdk: &protocol.SdkInfo{Name: event.Sdk.Name, Version: event.Sdk.Version}}
if a.dsn != nil {
header.Dsn = a.dsn.String()
}
if header.EventID == "" {
header.EventID = protocol.GenerateEventID()
}
envelope := protocol.NewEnvelope(header)
item, err := event.ToEnvelopeItem()
if err != nil {
debuglog.Printf("Failed to convert event to envelope item: %v", err)
return
}
envelope.AddItem(item)
for _, attachment := range event.Attachments {
attachmentItem := protocol.NewAttachmentItem(attachment.Filename, attachment.ContentType, attachment.Payload)
envelope.AddItem(attachmentItem)
}
if err := a.transport.SendEnvelope(envelope); err != nil {
debuglog.Printf("Error sending envelope: %v", err)
}
}
func (a *internalAsyncTransportAdapter) Flush(timeout time.Duration) bool {
return a.transport.Flush(timeout)
}
func (a *internalAsyncTransportAdapter) FlushWithContext(ctx context.Context) bool {
return a.transport.FlushWithContext(ctx)
}
func (a *internalAsyncTransportAdapter) Close() {
a.transport.Close()
}

View File

@ -1,26 +1,20 @@
package sentry
import (
"crypto/rand"
"encoding/hex"
"encoding/json"
"fmt"
"os"
"runtime/debug"
"strings"
"time"
"github.com/getsentry/sentry-go/internal/debuglog"
"github.com/getsentry/sentry-go/internal/protocol"
exec "golang.org/x/sys/execabs"
)
func uuid() string {
id := make([]byte, 16)
// Prefer rand.Read over rand.Reader, see https://go-review.googlesource.com/c/go/+/272326/.
_, _ = rand.Read(id)
id[6] &= 0x0F // clear version
id[6] |= 0x40 // set version to 4 (random uuid)
id[8] &= 0x3F // clear variant
id[8] |= 0x80 // set to IETF variant
return hex.EncodeToString(id)
return protocol.GenerateEventID()
}
func fileExists(fileName string) bool {
@ -35,7 +29,7 @@ func monotonicTimeSince(start time.Time) (end time.Time) {
return start.Add(time.Since(start))
}
// nolint: deadcode, unused
// nolint: unused
func prettyPrint(data interface{}) {
dbg, _ := json.MarshalIndent(data, "", " ")
fmt.Println(string(dbg))
@ -61,31 +55,78 @@ func defaultRelease() (release string) {
}
for _, e := range envs {
if release = os.Getenv(e); release != "" {
Logger.Printf("Using release from environment variable %s: %s", e, release)
debuglog.Printf("Using release from environment variable %s: %s", e, release)
return release
}
}
if info, ok := debug.ReadBuildInfo(); ok {
buildInfoVcsRevision := revisionFromBuildInfo(info)
if len(buildInfoVcsRevision) > 0 {
return buildInfoVcsRevision
}
}
// Derive a version string from Git. Example outputs:
// v1.0.1-0-g9de4
// v2.0-8-g77df-dirty
// 4f72d7
cmd := exec.Command("git", "describe", "--long", "--always", "--dirty")
b, err := cmd.Output()
if err != nil {
// Either Git is not available or the current directory is not a
// Git repository.
var s strings.Builder
fmt.Fprintf(&s, "Release detection failed: %v", err)
if err, ok := err.(*exec.ExitError); ok && len(err.Stderr) > 0 {
fmt.Fprintf(&s, ": %s", err.Stderr)
if _, err := exec.LookPath("git"); err == nil {
cmd := exec.Command("git", "describe", "--long", "--always", "--dirty")
b, err := cmd.Output()
if err != nil {
// Either Git is not available or the current directory is not a
// Git repository.
var s strings.Builder
fmt.Fprintf(&s, "Release detection failed: %v", err)
if err, ok := err.(*exec.ExitError); ok && len(err.Stderr) > 0 {
fmt.Fprintf(&s, ": %s", err.Stderr)
}
debuglog.Print(s.String())
} else {
release = strings.TrimSpace(string(b))
debuglog.Printf("Using release from Git: %s", release)
return release
}
Logger.Print(s.String())
Logger.Print("Some Sentry features will not be available. See https://docs.sentry.io/product/releases/.")
Logger.Print("To stop seeing this message, pass a Release to sentry.Init or set the SENTRY_RELEASE environment variable.")
return ""
}
release = strings.TrimSpace(string(b))
Logger.Printf("Using release from Git: %s", release)
return release
debuglog.Print("Some Sentry features will not be available. See https://docs.sentry.io/product/releases/.")
debuglog.Print("To stop seeing this message, pass a Release to sentry.Init or set the SENTRY_RELEASE environment variable.")
return ""
}
func revisionFromBuildInfo(info *debug.BuildInfo) string {
for _, setting := range info.Settings {
if setting.Key == "vcs.revision" && setting.Value != "" {
debuglog.Printf("Using release from debug info: %s", setting.Value)
return setting.Value
}
}
return ""
}
func Pointer[T any](v T) *T {
return &v
}
// eventIdentifier returns a human-readable identifier for the event to be used in log messages.
// Format: "<description> [<event-id>]".
func eventIdentifier(event *Event) string {
var description string
switch event.Type {
case errorType:
description = "error"
case transactionType:
description = "transaction"
case checkInType:
description = "check-in"
case logEvent.Type:
description = fmt.Sprintf("%d log events", len(event.Logs))
case traceMetricEvent.Type:
description = fmt.Sprintf("%d metric events", len(event.Metrics))
default:
description = fmt.Sprintf("%s event", event.Type)
}
return fmt.Sprintf("%s [%s]", description, event.EventID)
}

View File

@ -1,101 +0,0 @@
## Changed
- Defined a custom error, ErrUnexpectedSignatureAlgorithm, returned when a JWS
header contains an unsupported signature algorithm.
# v4.0.4
## Fixed
- Reverted "Allow unmarshalling JSONWebKeySets with unsupported key types" as a
breaking change. See #136 / #137.
# v4.0.3
## Changed
- Allow unmarshalling JSONWebKeySets with unsupported key types (#130)
- Document that OpaqueKeyEncrypter can't be implemented (for now) (#129)
- Dependency updates
# v4.0.2
## Changed
- Improved documentation of Verify() to note that JSONWebKeySet is a supported
argument type (#104)
- Defined exported error values for missing x5c header and unsupported elliptic
curves error cases (#117)
# v4.0.1
## Fixed
- An attacker could send a JWE containing compressed data that used large
amounts of memory and CPU when decompressed by `Decrypt` or `DecryptMulti`.
Those functions now return an error if the decompressed data would exceed
250kB or 10x the compressed size (whichever is larger). Thanks to
Enze Wang@Alioth and Jianjun Chen@Zhongguancun Lab (@zer0yu and @chenjj)
for reporting.
# v4.0.0
This release makes some breaking changes in order to more thoroughly
address the vulnerabilities discussed in [Three New Attacks Against JSON Web
Tokens][1], "Sign/encrypt confusion", "Billion hash attack", and "Polyglot
token".
## Changed
- Limit JWT encryption types (exclude password or public key types) (#78)
- Enforce minimum length for HMAC keys (#85)
- jwt: match any audience in a list, rather than requiring all audiences (#81)
- jwt: accept only Compact Serialization (#75)
- jws: Add expected algorithms for signatures (#74)
- Require specifying expected algorithms for ParseEncrypted,
ParseSigned, ParseDetached, jwt.ParseEncrypted, jwt.ParseSigned,
jwt.ParseSignedAndEncrypted (#69, #74)
- Usually there is a small, known set of appropriate algorithms for a program
to use and it's a mistake to allow unexpected algorithms. For instance the
"billion hash attack" relies in part on programs accepting the PBES2
encryption algorithm and doing the necessary work even if they weren't
specifically configured to allow PBES2.
- Revert "Strip padding off base64 strings" (#82)
- The specs require base64url encoding without padding.
- Minimum supported Go version is now 1.21
## Added
- ParseSignedCompact, ParseSignedJSON, ParseEncryptedCompact, ParseEncryptedJSON.
- These allow parsing a specific serialization, as opposed to ParseSigned and
ParseEncrypted, which try to automatically detect which serialization was
provided. It's common to require a specific serialization for a specific
protocol - for instance JWT requires Compact serialization.
[1]: https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf
# v3.0.2
## Fixed
- DecryptMulti: handle decompression error (#19)
## Changed
- jwe/CompactSerialize: improve performance (#67)
- Increase the default number of PBKDF2 iterations to 600k (#48)
- Return the proper algorithm for ECDSA keys (#45)
## Added
- Add Thumbprint support for opaque signers (#38)
# v3.0.1
## Fixed
- Security issue: an attacker specifying a large "p2c" value can cause
JSONWebEncryption.Decrypt and JSONWebEncryption.DecryptMulti to consume large
amounts of CPU, causing a DoS. Thanks to Matt Schwager (@mschwager) for the
disclosure and to Tom Tervoort for originally publishing the category of attack.
https://i.blackhat.com/BH-US-23/Presentations/US-23-Tervoort-Three-New-Attacks-Against-JSON-Web-Tokens.pdf

View File

@ -3,7 +3,6 @@
[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4)
[![godoc](https://pkg.go.dev/badge/github.com/go-jose/go-jose/v4/jwt.svg)](https://pkg.go.dev/github.com/go-jose/go-jose/v4/jwt)
[![license](https://img.shields.io/badge/license-apache_2.0-blue.svg?style=flat)](https://raw.githubusercontent.com/go-jose/go-jose/master/LICENSE)
[![test](https://img.shields.io/github/checks-status/go-jose/go-jose/v4)](https://github.com/go-jose/go-jose/actions)
Package jose aims to provide an implementation of the Javascript Object Signing
and Encryption set of standards. This includes support for JSON Web Encryption,
@ -29,17 +28,20 @@ libraries in other languages.
### Versions
[Version 4](https://github.com/go-jose/go-jose)
([branch](https://github.com/go-jose/go-jose/tree/main),
[doc](https://pkg.go.dev/github.com/go-jose/go-jose/v4), [releases](https://github.com/go-jose/go-jose/releases)) is the current stable version:
The forthcoming Version 5 will be released with several breaking API changes,
and will require Golang's `encoding/json/v2`, which is currently requires
Go 1.25 built with GOEXPERIMENT=jsonv2.
Version 4 is the current stable version:
import "github.com/go-jose/go-jose/v4"
The old [square/go-jose](https://github.com/square/go-jose) repo contains the prior v1 and v2 versions, which
are still useable but not actively developed anymore.
It supports at least the current and previous Golang release. Currently it
requires Golang 1.24.
Version 3, in this repo, is still receiving security fixes but not functionality
updates.
Version 3 is only receiving critical security updates. Migration to Version 4 is recommended.
Versions 1 and 2 are obsolete, but can be found in the old repository, [square/go-jose](https://github.com/square/go-jose).
### Supported algorithms
@ -47,36 +49,36 @@ See below for a table of supported algorithms. Algorithm identifiers match
the names in the [JSON Web Algorithms](https://dx.doi.org/10.17487/RFC7518)
standard where possible. The Godoc reference has a list of constants.
Key encryption | Algorithm identifier(s)
:------------------------- | :------------------------------
RSA-PKCS#1v1.5 | RSA1_5
RSA-OAEP | RSA-OAEP, RSA-OAEP-256
AES key wrap | A128KW, A192KW, A256KW
AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW
ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW
ECDH-ES (direct) | ECDH-ES<sup>1</sup>
Direct encryption | dir<sup>1</sup>
| Key encryption | Algorithm identifier(s) |
|:-----------------------|:-----------------------------------------------|
| RSA-PKCS#1v1.5 | RSA1_5 |
| RSA-OAEP | RSA-OAEP, RSA-OAEP-256 |
| AES key wrap | A128KW, A192KW, A256KW |
| AES-GCM key wrap | A128GCMKW, A192GCMKW, A256GCMKW |
| ECDH-ES + AES key wrap | ECDH-ES+A128KW, ECDH-ES+A192KW, ECDH-ES+A256KW |
| ECDH-ES (direct) | ECDH-ES<sup>1</sup> |
| Direct encryption | dir<sup>1</sup> |
<sup>1. Not supported in multi-recipient mode</sup>
Signing / MAC | Algorithm identifier(s)
:------------------------- | :------------------------------
RSASSA-PKCS#1v1.5 | RS256, RS384, RS512
RSASSA-PSS | PS256, PS384, PS512
HMAC | HS256, HS384, HS512
ECDSA | ES256, ES384, ES512
Ed25519 | EdDSA<sup>2</sup>
| Signing / MAC | Algorithm identifier(s) |
|:------------------|:------------------------|
| RSASSA-PKCS#1v1.5 | RS256, RS384, RS512 |
| RSASSA-PSS | PS256, PS384, PS512 |
| HMAC | HS256, HS384, HS512 |
| ECDSA | ES256, ES384, ES512 |
| Ed25519 | EdDSA<sup>2</sup> |
<sup>2. Only available in version 2 of the package</sup>
Content encryption | Algorithm identifier(s)
:------------------------- | :------------------------------
AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512
AES-GCM | A128GCM, A192GCM, A256GCM
| Content encryption | Algorithm identifier(s) |
|:-------------------|:--------------------------------------------|
| AES-CBC+HMAC | A128CBC-HS256, A192CBC-HS384, A256CBC-HS512 |
| AES-GCM | A128GCM, A192GCM, A256GCM |
Compression | Algorithm identifiers(s)
:------------------------- | -------------------------------
DEFLATE (RFC 1951) | DEF
| Compression | Algorithm identifiers(s) |
|:-------------------|--------------------------|
| DEFLATE (RFC 1951) | DEF |
### Supported key types
@ -85,12 +87,12 @@ library, and can be passed to corresponding functions such as `NewEncrypter` or
`NewSigner`. Each of these keys can also be wrapped in a JWK if desired, which
allows attaching a key id.
Algorithm(s) | Corresponding types
:------------------------- | -------------------------------
RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey)
ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey)
EdDSA<sup>1</sup> | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey)
AES, HMAC | []byte
| Algorithm(s) | Corresponding types |
|:------------------|--------------------------------------------------------------------------------------------------------------------------------------|
| RSA | *[rsa.PublicKey](https://pkg.go.dev/crypto/rsa/#PublicKey), *[rsa.PrivateKey](https://pkg.go.dev/crypto/rsa/#PrivateKey) |
| ECDH, ECDSA | *[ecdsa.PublicKey](https://pkg.go.dev/crypto/ecdsa/#PublicKey), *[ecdsa.PrivateKey](https://pkg.go.dev/crypto/ecdsa/#PrivateKey) |
| EdDSA<sup>1</sup> | [ed25519.PublicKey](https://pkg.go.dev/crypto/ed25519#PublicKey), [ed25519.PrivateKey](https://pkg.go.dev/crypto/ed25519#PrivateKey) |
| AES, HMAC | []byte |
<sup>1. Only available in version 2 or later of the package</sup>

View File

@ -286,6 +286,10 @@ func makeJWERecipient(alg KeyAlgorithm, encryptionKey interface{}) (recipientKey
return newSymmetricRecipient(alg, encryptionKey)
case string:
return newSymmetricRecipient(alg, []byte(encryptionKey))
case JSONWebKey:
recipient, err := makeJWERecipient(alg, encryptionKey.Key)
recipient.keyID = encryptionKey.KeyID
return recipient, err
case *JSONWebKey:
recipient, err := makeJWERecipient(alg, encryptionKey.Key)
recipient.keyID = encryptionKey.KeyID
@ -450,13 +454,9 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
return nil, errors.New("go-jose/go-jose: too many recipients in payload; expecting only one")
}
critical, err := headers.getCritical()
err := headers.checkNoCritical()
if err != nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
}
if len(critical) > 0 {
return nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
return nil, err
}
key, err := tryJWKS(decryptionKey, obj.Header)
@ -523,13 +523,9 @@ func (obj JSONWebEncryption) Decrypt(decryptionKey interface{}) ([]byte, error)
func (obj JSONWebEncryption) DecryptMulti(decryptionKey interface{}) (int, Header, []byte, error) {
globalHeaders := obj.mergedHeaders(nil)
critical, err := globalHeaders.getCritical()
err := globalHeaders.checkNoCritical()
if err != nil {
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: invalid crit header")
}
if len(critical) > 0 {
return -1, Header{}, nil, fmt.Errorf("go-jose/go-jose: unsupported crit header")
return -1, Header{}, nil, err
}
key, err := tryJWKS(decryptionKey, obj.Header)

View File

@ -274,7 +274,7 @@ func validateAlgEnc(headers rawHeader, keyAlgorithms []KeyAlgorithm, contentEncr
if alg != "" && !containsKeyAlgorithm(keyAlgorithms, alg) {
return fmt.Errorf("unexpected key algorithm %q; expected %q", alg, keyAlgorithms)
}
if alg != "" && !containsContentEncryption(contentEncryption, enc) {
if enc != "" && !containsContentEncryption(contentEncryption, enc) {
return fmt.Errorf("unexpected content encryption algorithm %q; expected %q", enc, contentEncryption)
}
return nil
@ -288,11 +288,20 @@ func ParseEncryptedCompact(
keyAlgorithms []KeyAlgorithm,
contentEncryption []ContentEncryption,
) (*JSONWebEncryption, error) {
// Five parts is four separators
if strings.Count(input, ".") != 4 {
return nil, fmt.Errorf("go-jose/go-jose: compact JWE format must have five parts")
var parts [5]string
var ok bool
for i := range 4 {
parts[i], input, ok = strings.Cut(input, ".")
if !ok {
return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts")
}
}
parts := strings.SplitN(input, ".", 5)
// Validate that the last part does not contain more dots
if strings.ContainsRune(input, '.') {
return nil, errors.New("go-jose/go-jose: compact JWE format must have five parts")
}
parts[4] = input
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
if err != nil {

View File

@ -175,6 +175,8 @@ func (k JSONWebKey) MarshalJSON() ([]byte, error) {
}
// UnmarshalJSON reads a key from its JSON representation.
//
// Returns ErrUnsupportedKeyType for unrecognized or unsupported "kty" header values.
func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
var raw rawJSONWebKey
err = json.Unmarshal(data, &raw)
@ -228,7 +230,7 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
}
key, err = raw.symmetricKey()
case "OKP":
if raw.Crv == "Ed25519" && raw.X != nil {
if raw.Crv == "Ed25519" {
if raw.D != nil {
key, err = raw.edPrivateKey()
if err == nil {
@ -238,17 +240,27 @@ func (k *JSONWebKey) UnmarshalJSON(data []byte) (err error) {
key, err = raw.edPublicKey()
keyPub = key
}
} else {
return fmt.Errorf("go-jose/go-jose: unknown curve %s'", raw.Crv)
}
default:
return fmt.Errorf("go-jose/go-jose: unknown json web key type '%s'", raw.Kty)
case "":
// kty MUST be present
err = fmt.Errorf("go-jose/go-jose: missing json web key type")
}
if err != nil {
return
}
if key == nil {
// RFC 7517:
// 5. JWK Set Format
// ...
// Implementations SHOULD ignore JWKs within a JWK Set that use "kty"
// (key type) values that are not understood by them, that are missing
// required members, or for which values are out of the supported
// ranges.
return ErrUnsupportedKeyType
}
if certPub != nil && keyPub != nil {
if !reflect.DeepEqual(certPub, keyPub) {
return errors.New("go-jose/go-jose: invalid JWK, public keys in key and x5c fields do not match")
@ -581,10 +593,10 @@ func fromEcPublicKey(pub *ecdsa.PublicKey) (*rawJSONWebKey, error) {
func (key rawJSONWebKey) edPrivateKey() (ed25519.PrivateKey, error) {
var missing []string
switch {
case key.D == nil:
if key.D == nil {
missing = append(missing, "D")
case key.X == nil:
}
if key.X == nil {
missing = append(missing, "X")
}
@ -611,19 +623,21 @@ func (key rawJSONWebKey) edPublicKey() (ed25519.PublicKey, error) {
func (key rawJSONWebKey) rsaPrivateKey() (*rsa.PrivateKey, error) {
var missing []string
switch {
case key.N == nil:
if key.N == nil {
missing = append(missing, "N")
case key.E == nil:
}
if key.E == nil {
missing = append(missing, "E")
case key.D == nil:
}
if key.D == nil {
missing = append(missing, "D")
case key.P == nil:
}
if key.P == nil {
missing = append(missing, "P")
case key.Q == nil:
}
if key.Q == nil {
missing = append(missing, "Q")
}
if len(missing) > 0 {
return nil, fmt.Errorf("go-jose/go-jose: invalid RSA private key, missing %s value(s)", strings.Join(missing, ", "))
}
@ -698,8 +712,19 @@ func (key rawJSONWebKey) ecPrivateKey() (*ecdsa.PrivateKey, error) {
return nil, fmt.Errorf("go-jose/go-jose: unsupported elliptic curve '%s'", key.Crv)
}
if key.X == nil || key.Y == nil || key.D == nil {
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, missing x/y/d values")
var missing []string
if key.X == nil {
missing = append(missing, "X")
}
if key.Y == nil {
missing = append(missing, "Y")
}
if key.D == nil {
missing = append(missing, "D")
}
if len(missing) > 0 {
return nil, fmt.Errorf("go-jose/go-jose: invalid EC private key, missing %s value(s)", strings.Join(missing, ", "))
}
// The length of this octet string MUST be the full size of a coordinate for

View File

@ -361,35 +361,43 @@ func (parsed *rawJSONWebSignature) sanitized(signatureAlgorithms []SignatureAlgo
return obj, nil
}
const tokenDelim = "."
// parseSignedCompact parses a message in compact format.
func parseSignedCompact(
input string,
payload []byte,
signatureAlgorithms []SignatureAlgorithm,
) (*JSONWebSignature, error) {
// Three parts is two separators
if strings.Count(input, ".") != 2 {
protected, s, ok := strings.Cut(input, tokenDelim)
if !ok { // no period found
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
}
claims, sig, ok := strings.Cut(s, tokenDelim)
if !ok { // only one period found
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
}
if strings.ContainsRune(sig, '.') { // too many periods found
return nil, fmt.Errorf("go-jose/go-jose: compact JWS format must have three parts")
}
parts := strings.SplitN(input, ".", 3)
if parts[1] != "" && payload != nil {
if claims != "" && payload != nil {
return nil, fmt.Errorf("go-jose/go-jose: payload is not detached")
}
rawProtected, err := base64.RawURLEncoding.DecodeString(parts[0])
rawProtected, err := base64.RawURLEncoding.DecodeString(protected)
if err != nil {
return nil, err
}
if payload == nil {
payload, err = base64.RawURLEncoding.DecodeString(parts[1])
payload, err = base64.RawURLEncoding.DecodeString(claims)
if err != nil {
return nil, err
}
}
signature, err := base64.RawURLEncoding.DecodeString(parts[2])
signature, err := base64.RawURLEncoding.DecodeString(sig)
if err != nil {
return nil, err
}

View File

@ -77,6 +77,9 @@ var (
// ErrUnsupportedEllipticCurve indicates unsupported or unknown elliptic curve has been found.
ErrUnsupportedEllipticCurve = errors.New("go-jose/go-jose: unsupported/unknown elliptic curve")
// ErrUnsupportedCriticalHeader is returned when a header is marked critical but not supported by go-jose.
ErrUnsupportedCriticalHeader = errors.New("go-jose/go-jose: unsupported critical header")
)
// Key management algorithms
@ -167,8 +170,8 @@ const (
)
// supportedCritical is the set of supported extensions that are understood and processed.
var supportedCritical = map[string]bool{
headerB64: true,
var supportedCritical = map[string]struct{}{
headerB64: {},
}
// rawHeader represents the JOSE header for JWE/JWS objects (used for parsing).
@ -346,6 +349,32 @@ func (parsed rawHeader) getCritical() ([]string, error) {
return q, nil
}
// checkNoCritical verifies there are no critical headers present.
func (parsed rawHeader) checkNoCritical() error {
if _, ok := parsed[headerCritical]; ok {
return ErrUnsupportedCriticalHeader
}
return nil
}
// checkSupportedCritical verifies there are no unsupported critical headers.
// Supported headers are passed in as a set: map of names to empty structs
func (parsed rawHeader) checkSupportedCritical(supported map[string]struct{}) error {
crit, err := parsed.getCritical()
if err != nil {
return err
}
for _, name := range crit {
if _, ok := supported[name]; !ok {
return ErrUnsupportedCriticalHeader
}
}
return nil
}
// getS2C extracts parsed "p2c" from the raw JSON.
func (parsed rawHeader) getP2C() (int, error) {
v := parsed[headerP2C]

View File

@ -404,15 +404,23 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter
}
signature := obj.Signatures[0]
headers := signature.mergedHeaders()
critical, err := headers.getCritical()
if err != nil {
return err
if signature.header != nil {
// Per https://www.rfc-editor.org/rfc/rfc7515.html#section-4.1.11,
// 4.1.11. "crit" (Critical) Header Parameter
// "When used, this Header Parameter MUST be integrity
// protected; therefore, it MUST occur only within the JWS
// Protected Header."
err = signature.header.checkNoCritical()
if err != nil {
return err
}
}
for _, name := range critical {
if !supportedCritical[name] {
return ErrCryptoFailure
if signature.protected != nil {
err = signature.protected.checkSupportedCritical(supportedCritical)
if err != nil {
return err
}
}
@ -421,6 +429,7 @@ func (obj JSONWebSignature) DetachedVerify(payload []byte, verificationKey inter
return ErrCryptoFailure
}
headers := signature.mergedHeaders()
alg := headers.getSignatureAlgorithm()
err = verifier.verifyPayload(input, signature.Signature, alg)
if err == nil {
@ -469,14 +478,22 @@ func (obj JSONWebSignature) DetachedVerifyMulti(payload []byte, verificationKey
outer:
for i, signature := range obj.Signatures {
headers := signature.mergedHeaders()
critical, err := headers.getCritical()
if err != nil {
continue
if signature.header != nil {
// Per https://www.rfc-editor.org/rfc/rfc7515.html#section-4.1.11,
// 4.1.11. "crit" (Critical) Header Parameter
// "When used, this Header Parameter MUST be integrity
// protected; therefore, it MUST occur only within the JWS
// Protected Header."
err = signature.header.checkNoCritical()
if err != nil {
continue outer
}
}
for _, name := range critical {
if !supportedCritical[name] {
if signature.protected != nil {
// Check for only supported critical headers
err = signature.protected.checkSupportedCritical(supportedCritical)
if err != nil {
continue outer
}
}
@ -486,6 +503,7 @@ outer:
continue
}
headers := signature.mergedHeaders()
alg := headers.getSignatureAlgorithm()
err = verifier.verifyPayload(input, signature.Signature, alg)
if err == nil {

18
vendor/modules.txt vendored
View File

@ -10,8 +10,8 @@ github.com/beorn7/perks/quantile
# github.com/cespare/xxhash/v2 v2.3.0
## explicit; go 1.11
github.com/cespare/xxhash/v2
# github.com/coreos/go-oidc/v3 v3.10.0
## explicit; go 1.21
# github.com/coreos/go-oidc/v3 v3.17.0
## explicit; go 1.24.0
github.com/coreos/go-oidc/v3/oidc
# github.com/coreos/go-systemd/v22 v22.5.0
## explicit; go 1.12
@ -39,13 +39,19 @@ github.com/fortytw2/leaktest
# github.com/fsnotify/fsnotify v1.4.9
## explicit; go 1.13
github.com/fsnotify/fsnotify
# github.com/getsentry/sentry-go v0.16.0
## explicit; go 1.19
# github.com/getsentry/sentry-go v0.43.0
## explicit; go 1.24.0
github.com/getsentry/sentry-go
github.com/getsentry/sentry-go/attribute
github.com/getsentry/sentry-go/internal/debug
github.com/getsentry/sentry-go/internal/debuglog
github.com/getsentry/sentry-go/internal/http
github.com/getsentry/sentry-go/internal/otel/baggage
github.com/getsentry/sentry-go/internal/otel/baggage/internal/baggage
github.com/getsentry/sentry-go/internal/protocol
github.com/getsentry/sentry-go/internal/ratelimit
github.com/getsentry/sentry-go/internal/telemetry
github.com/getsentry/sentry-go/internal/util
# github.com/gin-gonic/gin v1.9.1
## explicit; go 1.20
# github.com/go-chi/chi/v5 v5.2.2
@ -54,8 +60,8 @@ github.com/go-chi/chi/v5
# github.com/go-chi/cors v1.2.1
## explicit; go 1.14
github.com/go-chi/cors
# github.com/go-jose/go-jose/v4 v4.1.0
## explicit; go 1.24
# github.com/go-jose/go-jose/v4 v4.1.3
## explicit; go 1.24.0
github.com/go-jose/go-jose/v4
github.com/go-jose/go-jose/v4/cipher
github.com/go-jose/go-jose/v4/json