TUN-7227: Migrate to devincarr/quic-go
The lucas-clemente/quic-go package moved namespaces and our branch went stale, this new fork provides support for the new quic-go repo and applies the max datagram frame size change. Until the max datagram frame size support gets upstreamed into quic-go, this can be used to unblock go 1.20 support as the old lucas-clemente/quic-go will not get go 1.20 support.
This commit is contained in:
parent
ff9621bbd5
commit
9426b60308
connection
datagramsession
go.modgo.sumquic
conversion.godatagram.godatagram_test.godatagramv2.gometrics.goparam_windows.gosafe_stream.gosafe_stream_test.gotracing.go
supervisor
vendor/github.com
cheekybits/genny
golang/mock
google/pprof
lucas-clemente/quic-go
README.mdclient.goclosed_conn.godatagram_queue.go
internal
ackhandler
handshake
protocol
qtls
utils
atomic_bool.gogen.gominmax.gonew_connection_id.gonewconnectionid_linkedlist.gopacket_interval.gopacketinterval_linkedlist.gostreamframe_interval.go
wire
logging
mockgen.gomockgen_private.shmultiplexer.gopacket_handler_map.gopacket_packer.goserver.gostreams_map_generic_helper.gostreams_map_incoming_bidi.gostreams_map_incoming_generic.gostreams_map_outgoing_bidi.gostreams_map_outgoing_generic.gotools.gomarten-seemann/qtls-go1-16
|
@ -16,8 +16,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/quic-go/quic-go"
|
||||
"github.com/rs/zerolog"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
|
@ -67,6 +67,7 @@ type QUICConnection struct {
|
|||
|
||||
// NewQUICConnection returns a new instance of QUICConnection.
|
||||
func NewQUICConnection(
|
||||
ctx context.Context,
|
||||
quicConfig *quic.Config,
|
||||
edgeAddr net.Addr,
|
||||
localAddr net.IP,
|
||||
|
@ -83,7 +84,7 @@ func NewQUICConnection(
|
|||
return nil, err
|
||||
}
|
||||
|
||||
session, err := quic.Dial(udpConn, edgeAddr, edgeAddr.String(), tlsConfig, quicConfig)
|
||||
session, err := quic.Dial(ctx, udpConn, edgeAddr, tlsConfig, quicConfig)
|
||||
if err != nil {
|
||||
// close the udp server socket in case of error connecting to the edge
|
||||
udpConn.Close()
|
||||
|
|
|
@ -16,8 +16,8 @@ import (
|
|||
|
||||
"github.com/gobwas/ws/wsutil"
|
||||
"github.com/google/uuid"
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/quic-go/quic-go"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
@ -32,7 +32,6 @@ import (
|
|||
var (
|
||||
testTLSServerConfig = quicpogs.GenerateTLSConfig()
|
||||
testQUICConfig = &quic.Config{
|
||||
ConnectionIDLength: 16,
|
||||
KeepAlivePeriod: 5 * time.Second,
|
||||
EnableDatagrams: true,
|
||||
}
|
||||
|
@ -43,13 +42,6 @@ var _ ReadWriteAcker = (*streamReadWriteAcker)(nil)
|
|||
// TestQUICServer tests if a quic server accepts and responds to a quic client with the acceptance protocol.
|
||||
// It also serves as a demonstration for communication with the QUIC connection started by a cloudflared.
|
||||
func TestQUICServer(t *testing.T) {
|
||||
// Start a UDP Listener for QUIC.
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
udpListener, err := net.ListenUDP(udpAddr.Network(), udpAddr)
|
||||
require.NoError(t, err)
|
||||
defer udpListener.Close()
|
||||
|
||||
// This is simply a sample websocket frame message.
|
||||
wsBuf := &bytes.Buffer{}
|
||||
wsutil.WriteClientBinary(wsBuf, []byte("Hello"))
|
||||
|
@ -145,8 +137,14 @@ func TestQUICServer(t *testing.T) {
|
|||
test := test // capture range variable
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
||||
quicListener, err := quic.Listen(udpListener, testTLSServerConfig, testQUICConfig)
|
||||
// Start a UDP Listener for QUIC.
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", "127.0.0.1:0")
|
||||
require.NoError(t, err)
|
||||
udpListener, err := net.ListenUDP(udpAddr.Network(), udpAddr)
|
||||
require.NoError(t, err)
|
||||
defer udpListener.Close()
|
||||
quicTransport := &quic.Transport{Conn: udpListener, ConnectionIDLength: 16}
|
||||
quicListener, err := quicTransport.Listen(testTLSServerConfig, testQUICConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
serverDone := make(chan struct{})
|
||||
|
@ -187,7 +185,7 @@ func (fakeControlStream) IsStopped() bool {
|
|||
func quicServer(
|
||||
ctx context.Context,
|
||||
t *testing.T,
|
||||
listener quic.Listener,
|
||||
listener *quic.Listener,
|
||||
dest string,
|
||||
connectionType quicpogs.ConnectionType,
|
||||
metadata []quicpogs.Metadata,
|
||||
|
@ -713,7 +711,10 @@ func testQUICConnection(udpListenerAddr net.Addr, t *testing.T, index uint8) *QU
|
|||
}
|
||||
// Start a mock httpProxy
|
||||
log := zerolog.New(os.Stdout)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
qc, err := NewQUICConnection(
|
||||
ctx,
|
||||
testQUICConfig,
|
||||
udpListenerAddr,
|
||||
nil,
|
||||
|
|
|
@ -51,7 +51,7 @@ type Session struct {
|
|||
|
||||
func (s *Session) Serve(ctx context.Context, closeAfterIdle time.Duration) (closedByRemote bool, err error) {
|
||||
go func() {
|
||||
// QUIC implementation copies data to another buffer before returning https://github.com/lucas-clemente/quic-go/blob/v0.24.0/session.go#L1967-L1975
|
||||
// QUIC implementation copies data to another buffer before returning https://github.com/quic-go/quic-go/blob/v0.24.0/session.go#L1967-L1975
|
||||
// This makes it safe to share readBuffer between iterations
|
||||
const maxPacketSize = 1500
|
||||
readBuffer := make([]byte, maxPacketSize)
|
||||
|
|
30
go.mod
30
go.mod
|
@ -20,13 +20,13 @@ require (
|
|||
github.com/google/uuid v1.3.0
|
||||
github.com/gorilla/websocket v1.4.2
|
||||
github.com/json-iterator/go v1.1.12
|
||||
github.com/lucas-clemente/quic-go v0.28.1
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/miekg/dns v1.1.50
|
||||
github.com/mitchellh/go-homedir v1.1.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.13.0
|
||||
github.com/prometheus/client_model v0.2.0
|
||||
github.com/quic-go/quic-go v0.0.0-00010101000000-000000000000
|
||||
github.com/rs/zerolog v1.20.0
|
||||
github.com/stretchr/testify v1.8.1
|
||||
github.com/urfave/cli/v2 v2.3.0
|
||||
|
@ -57,7 +57,6 @@ require (
|
|||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/cheekybits/genny v1.0.0 // indirect
|
||||
github.com/cloudflare/circl v1.2.1-0.20220809205628-0a9554f37a47 // indirect
|
||||
github.com/coredns/caddy v1.1.1 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.0 // indirect
|
||||
|
@ -72,29 +71,29 @@ require (
|
|||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 // indirect
|
||||
github.com/gobwas/httphead v0.0.0-20200921212729-da3d93bc3c58 // indirect
|
||||
github.com/gobwas/pool v0.2.1 // indirect
|
||||
github.com/golang/mock v1.6.0 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
||||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect
|
||||
github.com/klauspost/compress v1.15.11 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/kylelemons/godebug v1.1.0 // indirect
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.5 // indirect
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.2 // indirect
|
||||
github.com/marten-seemann/qtls-go1-18 v0.1.2 // indirect
|
||||
github.com/marten-seemann/qtls-go1-19 v0.1.0-beta.1 // indirect
|
||||
github.com/mattn/go-isatty v0.0.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
github.com/onsi/ginkgo/v2 v2.4.0 // indirect
|
||||
github.com/onsi/gomega v1.23.0 // indirect
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 // indirect
|
||||
github.com/prometheus/common v0.37.0 // indirect
|
||||
github.com/prometheus/procfs v0.8.0 // indirect
|
||||
github.com/quic-go/qtls-go1-19 v0.3.2 // indirect
|
||||
github.com/quic-go/qtls-go1-20 v0.2.2 // indirect
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db // indirect
|
||||
golang.org/x/mod v0.8.0 // indirect
|
||||
golang.org/x/oauth2 v0.4.0 // indirect
|
||||
golang.org/x/text v0.9.0 // indirect
|
||||
|
@ -103,26 +102,21 @@ require (
|
|||
google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd // indirect
|
||||
google.golang.org/grpc v1.51.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
||||
replace github.com/urfave/cli/v2 => github.com/ipostelnik/cli/v2 v2.3.1-0.20210324024421-b6ea8234fe3d
|
||||
|
||||
replace github.com/lucas-clemente/quic-go => github.com/chungthuang/quic-go v0.27.1-0.20220809135021-ca330f1dec9f
|
||||
|
||||
// Avoid 'CVE-2022-21698'
|
||||
replace github.com/prometheus/golang_client => github.com/prometheus/golang_client v1.12.1
|
||||
|
||||
replace gopkg.in/yaml.v3 => gopkg.in/yaml.v3 v3.0.1
|
||||
|
||||
replace github.com/quic-go/quic-go => github.com/devincarr/quic-go v0.0.0-20230502200822-d1f4edacbee7
|
||||
|
||||
// Post-quantum tunnel RTG-1339
|
||||
replace (
|
||||
// Branches go1.18 go1.19 go1.20 on github.com/cloudflare/qtls-pq
|
||||
github.com/marten-seemann/qtls-go1-18 => github.com/cloudflare/qtls-pq v0.0.0-20230103171413-e7a2fb559a0e
|
||||
github.com/marten-seemann/qtls-go1-19 => github.com/cloudflare/qtls-pq v0.0.0-20230103171656-05e84f90909e
|
||||
github.com/marten-seemann/qtls-go1-20 => github.com/cloudflare/qtls-pq v0.0.0-20230215110727-8b4e1699c2a8
|
||||
github.com/quic-go/qtls-go1-18 => github.com/cloudflare/qtls-pq v0.0.0-20230103171413-e7a2fb559a0e
|
||||
github.com/quic-go/qtls-go1-19 => github.com/cloudflare/qtls-pq v0.0.0-20230103171656-05e84f90909e
|
||||
github.com/quic-go/qtls-go1-20 => github.com/cloudflare/qtls-pq v0.0.0-20230215110727-8b4e1699c2a8
|
||||
// Branches go1.19 go1.20 on github.com/cloudflare/qtls-pq
|
||||
github.com/quic-go/qtls-go1-19 => github.com/cloudflare/qtls-pq v0.0.0-20230320123031-3faac1a945b2
|
||||
github.com/quic-go/qtls-go1-20 => github.com/cloudflare/qtls-pq v0.0.0-20230320122459-4ed280d0d633
|
||||
)
|
||||
|
|
149
go.sum
149
go.sum
|
@ -1,7 +1,5 @@
|
|||
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.31.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
|
||||
cloud.google.com/go v0.37.0/go.mod h1:TS1dMSSfndXH133OKGwekG838Om/cQT0BUHV3HcBgoo=
|
||||
cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU=
|
||||
cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU=
|
||||
cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY=
|
||||
|
@ -55,12 +53,7 @@ cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohl
|
|||
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
|
||||
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
|
||||
cloud.google.com/go/storage v1.22.1/go.mod h1:S8N1cAStu7BOeFfE8KAQzmyyLkK8p/vmRq6kuBTW58Y=
|
||||
dmitri.shuralyov.com/app/changes v0.0.0-20180602232624-0a106ad413e3/go.mod h1:Yl+fi1br7+Rr3LqpNJf1/uxUdtRUV+Tnj0o93V2B9MU=
|
||||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
dmitri.shuralyov.com/html/belt v0.0.0-20180602232347-f7d459c86be0/go.mod h1:JLBrvjyP0v+ecvNYvCpyZgu5/xkfAUhi6wJj28eUfSU=
|
||||
dmitri.shuralyov.com/service/change v0.0.0-20181023043359-a85b471d5412/go.mod h1:a1inKt/atXimZ4Mv927x+r7UpyzRUf4emIoiiSC2TN4=
|
||||
dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D6DFvNNtx+9ybjezNCa8XF0xaYcETyp6rHWU=
|
||||
git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/toml v1.2.0 h1:Rt8g24XnyGTyglgET/PRUNlrUeu9F5L+7FilkXfZgs0=
|
||||
github.com/BurntSushi/toml v1.2.0/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
|
@ -71,7 +64,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy
|
|||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/apparentlymart/go-cidr v1.1.0 h1:2mAhrMoF+nhXqxTzSZMUzDHkLjmIHC+Zzn4tdgBZjnU=
|
||||
github.com/apparentlymart/go-cidr v1.1.0/go.mod h1:EBcsNrHc3zQeuaeCeCtQruQm+n9/YjEn/vI25Lg7Gwc=
|
||||
|
@ -79,8 +71,6 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24
|
|||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bradfitz/go-smtpd v0.0.0-20170404230938-deb6d6237625/go.mod h1:HYsPBTaaSFSlLx/70C2HPIMNZpVV8+vt/A+FMnYP11g=
|
||||
github.com/buger/jsonparser v0.0.0-20181115193947-bf1c66bbce23/go.mod h1:bbYlZJ7hK1yFx9hf58LP0zeX7UjIGs20ufpu3evjr+s=
|
||||
github.com/bwesterb/go-ristretto v1.2.2/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
|
||||
github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
|
@ -90,10 +80,6 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
|
|||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cheekybits/genny v1.0.0 h1:uGGa4nei+j20rOSeDeP5Of12XVm7TGUd4dJA9RDitfE=
|
||||
github.com/cheekybits/genny v1.0.0/go.mod h1:+tQajlRqAUrPI7DOSpB0XAqZYtQakVtB7wXkRAgjxjQ=
|
||||
github.com/chungthuang/quic-go v0.27.1-0.20220809135021-ca330f1dec9f h1:UWC3XjwZzocdNCzzXxq9j/1SdHMZXhcTOsh/+gNRBUQ=
|
||||
github.com/chungthuang/quic-go v0.27.1-0.20220809135021-ca330f1dec9f/go.mod h1:oGz5DKK41cJt5+773+BSO9BXDsREY4HLf7+0odGAPO0=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
|
@ -104,10 +90,10 @@ github.com/cloudflare/circl v1.2.1-0.20220809205628-0a9554f37a47 h1:YzpECHxZ9TzO
|
|||
github.com/cloudflare/circl v1.2.1-0.20220809205628-0a9554f37a47/go.mod h1:qhx8gBILsYlbam7h09SvHDSkjpe3TfLA7b/z4rxJvkE=
|
||||
github.com/cloudflare/golibs v0.0.0-20170913112048-333127dbecfc h1:Dvk3ySBsOm5EviLx6VCyILnafPcQinXGP5jbTdHUJgE=
|
||||
github.com/cloudflare/golibs v0.0.0-20170913112048-333127dbecfc/go.mod h1:HlgKKR8V5a1wroIDDIz3/A+T+9Janfq+7n1P5sEFdi0=
|
||||
github.com/cloudflare/qtls-pq v0.0.0-20230103171413-e7a2fb559a0e h1:frfo+L0qloEb6Vj+qjS4pbAYSJQZAlUnKZu0uJoErac=
|
||||
github.com/cloudflare/qtls-pq v0.0.0-20230103171413-e7a2fb559a0e/go.mod h1:mW0BgKFFDAiSmOdUwoORtjo0V2vqw5QzVYRtKQqw/Jg=
|
||||
github.com/cloudflare/qtls-pq v0.0.0-20230103171656-05e84f90909e h1:RtQDXvDi0PK3EonP0v7zkE5/rApK4MsgRATCdD+ughg=
|
||||
github.com/cloudflare/qtls-pq v0.0.0-20230103171656-05e84f90909e/go.mod h1:aIsWqC0WXyUiUxBl/RfxAjDyWE9CCLqvSMnCMTd/+bc=
|
||||
github.com/cloudflare/qtls-pq v0.0.0-20230320122459-4ed280d0d633 h1:ZTub2XMOBpxyBiJf6Q+UKqAi07yt1rZmFitriHvFd8M=
|
||||
github.com/cloudflare/qtls-pq v0.0.0-20230320122459-4ed280d0d633/go.mod h1:j/igSUc4PgBMayIsBGjAFu2i7g663rm6kZrKy4htb7E=
|
||||
github.com/cloudflare/qtls-pq v0.0.0-20230320123031-3faac1a945b2 h1:0/KuLjh9lBMiXlooAdwoo+FbLVD5DABtquB0ImEFOK0=
|
||||
github.com/cloudflare/qtls-pq v0.0.0-20230320123031-3faac1a945b2/go.mod h1:XzuZIjv4mF5cM205RHHW1d60PQtWGwMR6jx38YKuYHs=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
|
@ -123,7 +109,6 @@ github.com/coredns/coredns v1.10.0 h1:jCfuWsBjTs0dapkkhISfPCzn5LqvSRtrFtaf/Tjj4D
|
|||
github.com/coredns/coredns v1.10.0/go.mod h1:CIfRU5TgpuoIiJBJ4XrofQzfFQpPFh32ERpUevrSlaw=
|
||||
github.com/coreos/go-oidc/v3 v3.4.0 h1:xz7elHb/LDwm/ERpwHd+5nb7wFHL32rsr6bBOgaeu6g=
|
||||
github.com/coreos/go-oidc/v3 v3.4.0/go.mod h1:eHUXhZtXPQLgEaDrOVTgwbgmz1xGOkJNye6h3zkD2Pw=
|
||||
github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU=
|
||||
github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
|
@ -134,7 +119,8 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
|||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
|
||||
github.com/devincarr/quic-go v0.0.0-20230502200822-d1f4edacbee7 h1:qxyoKKPXmPsbvT7SZTcvhEgUaZhEttk4f6u8rIawKj0=
|
||||
github.com/devincarr/quic-go v0.0.0-20230502200822-d1f4edacbee7/go.mod h1:+4CVgVppm0FNjpG3UcX8Joi/frKOH7/ciD5yGcwOO1g=
|
||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
|
@ -157,8 +143,6 @@ github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870 h1:E2s37DuLxFhQD
|
|||
github.com/facebookgo/subset v0.0.0-20150612182917-8dac2c3c4870/go.mod h1:5tD+neXqOorC30/tWg0LCSkrqj/AR6gu8yY8/fpw1q0=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568 h1:BHsljHzVlRcyQhjrss6TZTdY2VfCqZPbv5k3iBFa2ZQ=
|
||||
github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc=
|
||||
github.com/francoispqt/gojay v1.2.13/go.mod h1:ehT5mTG4ua4581f1++1WLG0vPdaA9HaiDsoyrBGkyDY=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs=
|
||||
|
@ -170,10 +154,8 @@ github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE
|
|||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M=
|
||||
github.com/gin-gonic/gin v1.8.1 h1:4+fr/el88TOO3ewCmQr8cx/CtZ/umlIRIs5M4NTNjf8=
|
||||
github.com/gliderlabs/ssh v0.1.1/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0=
|
||||
github.com/go-chi/chi/v5 v5.0.8 h1:lD+NLqFcAi1ovnVZpsnObHGW4xb4J8lNmoYVfECH1Y0=
|
||||
github.com/go-chi/chi/v5 v5.0.8/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8=
|
||||
github.com/go-errors/errors v1.0.1/go.mod h1:f4zRHt4oKfwPJE5k8C9vpYG+aDHdBFUsgrm6/TyX73Q=
|
||||
github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxIA=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
|
@ -222,7 +204,6 @@ github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0L
|
|||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E=
|
||||
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
|
||||
|
@ -269,8 +250,6 @@ github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ=
|
||||
github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gopacket v1.1.19 h1:ves8RnFZPGiFnTS0uPQStjwru6uO6h+nlr9j6fL7kF8=
|
||||
github.com/google/gopacket v1.1.19/go.mod h1:iJ8V8n6KS+z2U1A8pUwu8bW5SyEMkXJB8Yo/Vo+TKTo=
|
||||
|
@ -291,6 +270,7 @@ github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
|
@ -298,8 +278,6 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
|||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.0.0-20220520183353-fd19c99a87aa/go.mod h1:17drOmN3MwGY7t0e+Ei9b45FFGA3fBs3x36SsCg1hq8=
|
||||
github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY=
|
||||
github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
|
||||
|
@ -308,12 +286,9 @@ github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/Oth
|
|||
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
|
||||
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
|
||||
github.com/googleapis/go-type-adapters v1.0.0/go.mod h1:zHW75FOG2aur7gAO2B+MLby+cLsWGBF62rFAi7WjWO4=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||
|
@ -321,12 +296,10 @@ github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 h1
|
|||
github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw=
|
||||
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ipostelnik/cli/v2 v2.3.1-0.20210324024421-b6ea8234fe3d h1:PRDnysJ9dF1vUMmEzBu6aHQeUluSQy4eWH3RsSSy/vI=
|
||||
github.com/ipostelnik/cli/v2 v2.3.1-0.20210324024421-b6ea8234fe3d/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
|
||||
github.com/jellevandenhooff/dkim v0.0.0-20150330215556-f50fe3d243e1/go.mod h1:E0B/fFc00Y+Rasa88328GlI/XbtyysCtTHZS8h7IrBU=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
|
@ -349,7 +322,6 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN
|
|||
github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/pty v1.1.3/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
|
@ -357,13 +329,6 @@ github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0
|
|||
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
|
||||
github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII=
|
||||
github.com/leodido/go-urn v1.2.1 h1:BqpAaACuzVSgi/VLzGZIobT2z4v53pjosyNd9Yv6n/w=
|
||||
github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI=
|
||||
github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc=
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.5 h1:o9JrYPPco/Nukd/HpOHMHZoBDXQqoNtUCmny98/1uqQ=
|
||||
github.com/marten-seemann/qtls-go1-16 v0.1.5/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk=
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.2 h1:JADBlm0LYiVbuSySCHeY863dNkcpMmDR7s0bLKJeYlQ=
|
||||
github.com/marten-seemann/qtls-go1-17 v0.1.2/go.mod h1:C2ekUKcDdz9SDWxec1N/MvcXBpaX9l3Nx67XaR84L5s=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
|
@ -371,7 +336,6 @@ github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peK
|
|||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/microcosm-cc/bluemonday v1.0.1/go.mod h1:hsXNsILzKxV+sX77C5b8FSuKF00vh2OMYv+xgHpAMF4=
|
||||
github.com/miekg/dns v1.1.50 h1:DQUfb9uc6smULcREF09Uc+/Gd46YWqJd5DbpPE9xkcA=
|
||||
github.com/miekg/dns v1.1.50/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME=
|
||||
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
|
||||
|
@ -385,26 +349,12 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G
|
|||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo=
|
||||
github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E=
|
||||
github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0=
|
||||
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
|
||||
github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY=
|
||||
github.com/onsi/ginkgo/v2 v2.4.0 h1:+Ig9nvqgS5OBSACXNk15PLdp0U9XPYROt9CFzVdFGIs=
|
||||
github.com/onsi/ginkgo/v2 v2.4.0/go.mod h1:iHkDK1fKGcBoEHT5W7YBq4RFWaQulw+caOMkAt4OrFo=
|
||||
github.com/onsi/gomega v1.23.0 h1:/oxKu9c2HVap+F3PfKort2Hw5DEU+HGlW8n+tguWsys=
|
||||
github.com/onsi/gomega v1.23.0/go.mod h1:Z/NWtiqwBrwUt4/2loMmHL63EDLnYHmVbuBpDr2vQAg=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8=
|
||||
github.com/pelletier/go-toml/v2 v2.0.5 h1:ipoSadvV8oGUjnUbMub59IDPPwfxF694nG/jwbMiyQg=
|
||||
github.com/philhofer/fwd v1.1.1 h1:GdGcTjf5RNAxwS4QLsiMzJYj5KEvPJD3Abr261yRQXQ=
|
||||
github.com/pingcap/errors v0.11.4 h1:lFuQV/oaUMGcD2tqt+01ROSmJs75VG1ToEOkZIZ4nE4=
|
||||
|
@ -416,7 +366,6 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
|||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35 h1:J9b7z+QKAmPf4YLrFg6oQUotqHQeUNWwkvo7jZp1GLU=
|
||||
github.com/pquerna/cachecontrol v0.0.0-20180517163645-1555304b9b35/go.mod h1:prYjPmNq4d1NPVmpShWobRqXY3q7Vp+80DqgxxUrUIA=
|
||||
github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
|
@ -429,14 +378,12 @@ github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:
|
|||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE=
|
||||
github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA=
|
||||
github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
|
@ -449,39 +396,13 @@ github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFR
|
|||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.20.0 h1:38k9hgtUBdxFwE34yS8rTHmHBa4eN16E4DJlv177LNs=
|
||||
github.com/rs/zerolog v1.20.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo=
|
||||
github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
|
||||
github.com/shurcooL/component v0.0.0-20170202220835-f88ec8f54cc4/go.mod h1:XhFIlyj5a1fBNx5aJTbKoIq0mNaPvOagO+HjB3EtxrY=
|
||||
github.com/shurcooL/events v0.0.0-20181021180414-410e4ca65f48/go.mod h1:5u70Mqkb5O5cxEA8nxTsgrgLehJeAw6Oc4Ab1c/P1HM=
|
||||
github.com/shurcooL/github_flavored_markdown v0.0.0-20181002035957-2122de532470/go.mod h1:2dOwnU2uBioM+SGy2aZoq1f/Sd1l9OkAeAUvjSyvgU0=
|
||||
github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk=
|
||||
github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ=
|
||||
github.com/shurcooL/gofontwoff v0.0.0-20180329035133-29b52fc0a18d/go.mod h1:05UtEgK5zq39gLST6uB0cf3NEHjETfB4Fgr3Gx5R9Vw=
|
||||
github.com/shurcooL/gopherjslib v0.0.0-20160914041154-feb6d3990c2c/go.mod h1:8d3azKNyqcHP1GaQE/c6dDgjkgSx2BZ4IoEi4F1reUI=
|
||||
github.com/shurcooL/highlight_diff v0.0.0-20170515013008-09bb4053de1b/go.mod h1:ZpfEhSmds4ytuByIcDnOLkTHGUI6KNqRNPDLHDk+mUU=
|
||||
github.com/shurcooL/highlight_go v0.0.0-20181028180052-98c3abbbae20/go.mod h1:UDKB5a1T23gOMUJrI+uSuH0VRDStOiUVSjBTRDVBVag=
|
||||
github.com/shurcooL/home v0.0.0-20181020052607-80b7ffcb30f9/go.mod h1:+rgNQw2P9ARFAs37qieuu7ohDNQ3gds9msbT2yn85sg=
|
||||
github.com/shurcooL/htmlg v0.0.0-20170918183704-d01228ac9e50/go.mod h1:zPn1wHpTIePGnXSHpsVPWEktKXHr6+SS6x/IKRb7cpw=
|
||||
github.com/shurcooL/httperror v0.0.0-20170206035902-86b7830d14cc/go.mod h1:aYMfkZ6DWSJPJ6c4Wwz3QtW22G7mf/PEgaB9k/ik5+Y=
|
||||
github.com/shurcooL/httpfs v0.0.0-20171119174359-809beceb2371/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg=
|
||||
github.com/shurcooL/httpgzip v0.0.0-20180522190206-b1c53ac65af9/go.mod h1:919LwcH0M7/W4fcZ0/jy0qGght1GIhqyS/EgWGH2j5Q=
|
||||
github.com/shurcooL/issues v0.0.0-20181008053335-6292fdc1e191/go.mod h1:e2qWDig5bLteJ4fwvDAc2NHzqFEthkqn7aOZAOpj+PQ=
|
||||
github.com/shurcooL/issuesapp v0.0.0-20180602232740-048589ce2241/go.mod h1:NPpHK2TI7iSaM0buivtFUc9offApnI0Alt/K8hcHy0I=
|
||||
github.com/shurcooL/notifications v0.0.0-20181007000457-627ab5aea122/go.mod h1:b5uSkrEVM1jQUspwbixRBhaIjIzL2xazXp6kntxYle0=
|
||||
github.com/shurcooL/octicon v0.0.0-20181028054416-fa4f57f9efb2/go.mod h1:eWdoE5JD4R5UVWDucdOPg1g2fqQRq78IQa9zlOV1vpQ=
|
||||
github.com/shurcooL/reactions v0.0.0-20181006231557-f2e0b4ca5b82/go.mod h1:TCR1lToEk4d2s07G3XGfz2QrgHXg4RJBvjrOozvoWfk=
|
||||
github.com/shurcooL/sanitized_anchor_name v0.0.0-20170918181015-86672fcb3f95/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
|
||||
github.com/shurcooL/users v0.0.0-20180125191416-49c67e49c537/go.mod h1:QJTqeLYEDaXHZDBsXlPCDqdhQuJkuw4NOtaxYe3xii4=
|
||||
github.com/shurcooL/webdavfs v0.0.0-20170829043945-18c3829fa133/go.mod h1:hKmq5kWdCj2z2KEozexVbfEZIWiTjhE0+UjmZgPqehw=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/sourcegraph/annotate v0.0.0-20160123013949-f4cad6c6324d/go.mod h1:UdhH50NIW0fCiwBSr0co2m7BnFLdv4fQTgdqdJTHFeE=
|
||||
github.com/sourcegraph/syntaxhighlight v0.0.0-20170531221838-bd320f5d308e/go.mod h1:HuIsMU8RRBOtsCgI77wP899iHVBQpCmg4ErYMZB+2IA=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
|
@ -497,20 +418,16 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/tarm/serial v0.0.0-20180830185346-98f6abe2eb07/go.mod h1:kDXzergiv9cbyO7IOYJZWg1U88JhDg3PB6klq9Hg2pA=
|
||||
github.com/tinylib/msgp v1.1.2 h1:gWmO7n0Ys2RBEb7GPYB9Ujq8Mk5p2U08lRnmMcGy6BQ=
|
||||
github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo=
|
||||
github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw=
|
||||
github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY=
|
||||
github.com/ugorji/go/codec v1.2.7 h1:YPXUKf7fYbp/y8xloBqZOw2qaVggbfwMlI8WM3wZUJ0=
|
||||
github.com/viant/assertly v0.4.8/go.mod h1:aGifi++jvCrUaklKEKT0BU95igDNaqkvz+49uaYMPRU=
|
||||
github.com/viant/toolbox v0.24.0/go.mod h1:OxMCG57V0PXuIP2HNQrtJf2CjqdmbrOx5EkMILuUhzM=
|
||||
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
go.opencensus.io v0.18.0/go.mod h1:vKdFvxhtzZ9onBp9VKHK8z/sRpBMnKAsufL7wlDrCOA=
|
||||
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
|
||||
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
||||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
|
@ -536,17 +453,12 @@ go.opentelemetry.io/proto/otlp v0.15.0 h1:h0bKrvdrT/9sBwEJ6iWUqT/N/xPcS66bL4u3is
|
|||
go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||
go.uber.org/automaxprocs v1.4.0 h1:CpDZl6aOlLhReez+8S3eEotD7Jx0Os++lemPlMULQP0=
|
||||
go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q=
|
||||
go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE=
|
||||
golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181030102418-4d3f4d9ffa16/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200221231518-2aa609cf4a9d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.8.0 h1:pd9TJtTueMTVQXzk8E2XESSMQDj/U7OUu0PqJqPXQjQ=
|
||||
|
@ -561,9 +473,10 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db h1:D/cFflL63o2KSLJIwjlcIt8PR064j/xsmdEJL/YvY/o=
|
||||
golang.org/x/exp v0.0.0-20221205204356-47842c84f3db/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
@ -591,14 +504,10 @@ golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8=
|
|||
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181029044818-c44066c5c816/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190313220215-9f648a60d977/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
|
@ -617,7 +526,6 @@ golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/
|
|||
golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
|
@ -630,7 +538,6 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210726213435-c6fcb2dbf985/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
|
@ -646,8 +553,6 @@ golang.org/x/net v0.0.0-20220826154423-83b083e8dc8b/go.mod h1:YDH+HFinaLZZlnHAfS
|
|||
golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM=
|
||||
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
@ -670,7 +575,6 @@ golang.org/x/oauth2 v0.0.0-20220608161450-d0670ef3b1eb/go.mod h1:jaDAt6Dkxork7Lm
|
|||
golang.org/x/oauth2 v0.0.0-20220822191816-0ebed06d0094/go.mod h1:h4gKUeWbJ4rQPri7E0u6Gs4e9Ri2zaLxzw5DI5XGrYg=
|
||||
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||
golang.org/x/perf v0.0.0-20180704124530-6e6d33e29852/go.mod h1:JLpeXjPJfIyPr5TlbXLkXWLhP8nz10XfvxElABhCtcw=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
@ -687,12 +591,9 @@ golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o=
|
|||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181029174526-d69651ed3497/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190316082340-a2f829d7f35f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -700,10 +601,8 @@ golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -719,7 +618,6 @@ golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -729,7 +627,6 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
@ -782,13 +679,10 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
|
||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181030000716-a0a13e073c7b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
|
||||
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
|
@ -833,7 +727,6 @@ golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82u
|
|||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
|
@ -851,9 +744,6 @@ golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8T
|
|||
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
golang.org/x/xerrors v0.0.0-20220609144429-65e65417b02f/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
|
||||
google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.0.0-20181030000543-1d582fd0359e/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0=
|
||||
google.golang.org/api v0.1.0/go.mod h1:UGEZY7KEX120AnNLIHFMKIo4obdJhkp2tPbaPlQx13Y=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
|
||||
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
|
||||
|
@ -894,8 +784,6 @@ google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6r
|
|||
google.golang.org/api v0.80.0/go.mod h1:xY3nI94gbvBrE0J6NHXhxOmW97HG7Khjkku6AFB3Hyg=
|
||||
google.golang.org/api v0.84.0/go.mod h1:NTsGnUFJMYROtiquksZHBWtHfeMC7iYthki7Eq3pa8o=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0=
|
||||
|
@ -904,10 +792,6 @@ google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID
|
|||
google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
|
||||
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
|
||||
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181029155118-b69ba1387ce2/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
|
||||
google.golang.org/genproto v0.0.0-20181202183823-bd91e49a0898/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg=
|
||||
google.golang.org/genproto v0.0.0-20190306203927-b5d61aea6440/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
|
||||
|
@ -987,9 +871,6 @@ google.golang.org/genproto v0.0.0-20220608133413-ed9918b62aac/go.mod h1:KEWEmljW
|
|||
google.golang.org/genproto v0.0.0-20220616135557-88e70c0c3a90/go.mod h1:KEWEmljWE5zPzLBa/oHl6DaEt9LmfH6WtH1OHIvleBA=
|
||||
google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd h1:OjndDrsik+Gt+e6fs45z9AxiewiKyLKYpA45W5Kpkks=
|
||||
google.golang.org/genproto v0.0.0-20221202195650-67e5cbc046fd/go.mod h1:cTsE614GARnxrLsqKREzmNYJACSWWpAWdNMwnD7c2BE=
|
||||
google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio=
|
||||
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
@ -1050,14 +931,10 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
|
|||
gopkg.in/coreos/go-oidc.v2 v2.2.1 h1:MY5SZClJ7vhjKfr64a4nHAOV/c3WH2gB9BMrR64J1Mc=
|
||||
gopkg.in/coreos/go-oidc.v2 v2.2.1/go.mod h1:fYaTe2FS96wZZwR17YTDHwG+Mw6fmyqJNxN2eNCGPCI=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI=
|
||||
gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
@ -1069,8 +946,6 @@ gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
|||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@ -1083,7 +958,5 @@ nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0
|
|||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sourcegraph.com/sourcegraph/go-diff v0.5.0/go.mod h1:kuch7UrkMzY0X+p9CRK03kfuPQ2zzQcaEFbx8wA8rck=
|
||||
sourcegraph.com/sqs/pbtypes v0.0.0-20180604144634-d3ebe8f20ae4/go.mod h1:ketZ/q3QxT9HOBeFhu6RdvsftgpsbFHBF5Cas6cDKZ0=
|
||||
zombiezen.com/go/capnproto2 v2.18.0+incompatible h1:mwfXZniffG5mXokQGHUJWGnqIBggoPfT/CEwon9Yess=
|
||||
zombiezen.com/go/capnproto2 v2.18.0+incompatible/go.mod h1:XO5Pr2SbXgqZwn0m0Ru54QBqpOf4K5AYBO+8LAOBQEQ=
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/logging"
|
||||
"github.com/quic-go/quic-go/logging"
|
||||
)
|
||||
|
||||
func perspectiveString(p logging.Perspective) string {
|
||||
|
@ -28,7 +28,7 @@ func durationToPromGauge(duration time.Duration) float64 {
|
|||
return float64(duration.Milliseconds())
|
||||
}
|
||||
|
||||
// Helper to convert https://pkg.go.dev/github.com/lucas-clemente/quic-go@v0.23.0/logging#PacketType into string
|
||||
// Helper to convert https://pkg.go.dev/github.com/quic-go/quic-go@v0.23.0/logging#PacketType into string
|
||||
func packetTypeString(pt logging.PacketType) string {
|
||||
switch pt {
|
||||
case logging.PacketTypeInitial:
|
||||
|
@ -52,7 +52,7 @@ func packetTypeString(pt logging.PacketType) string {
|
|||
}
|
||||
}
|
||||
|
||||
// Helper to convert https://pkg.go.dev/github.com/lucas-clemente/quic-go@v0.23.0/logging#PacketDropReason into string
|
||||
// Helper to convert https://pkg.go.dev/github.com/quic-go/quic-go@v0.23.0/logging#PacketDropReason into string
|
||||
func packetDropReasonString(reason logging.PacketDropReason) string {
|
||||
switch reason {
|
||||
case logging.PacketDropKeyUnavailable:
|
||||
|
@ -82,7 +82,7 @@ func packetDropReasonString(reason logging.PacketDropReason) string {
|
|||
}
|
||||
}
|
||||
|
||||
// Helper to convert https://pkg.go.dev/github.com/lucas-clemente/quic-go@v0.23.0/logging#PacketLossReason into string
|
||||
// Helper to convert https://pkg.go.dev/github.com/quic-go/quic-go@v0.23.0/logging#PacketLossReason into string
|
||||
func packetLossReasonString(reason logging.PacketLossReason) string {
|
||||
switch reason {
|
||||
case logging.PacketLossReorderingThreshold:
|
||||
|
|
|
@ -5,8 +5,8 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/quic-go/quic-go"
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"github.com/cloudflare/cloudflared/packet"
|
||||
|
|
|
@ -16,7 +16,7 @@ import (
|
|||
|
||||
"github.com/google/gopacket/layers"
|
||||
"github.com/google/uuid"
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/quic-go/quic-go"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/net/icmp"
|
||||
|
@ -180,8 +180,10 @@ func testDatagram(t *testing.T, version uint8, sessionToPayloads []*packet.Sessi
|
|||
InsecureSkipVerify: true,
|
||||
NextProtos: []string{"argotunnel"},
|
||||
}
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
// Establish quic connection
|
||||
quicSession, err := quic.DialAddrEarly(quicListener.Addr().String(), tlsClientConfig, quicConfig)
|
||||
quicSession, err := quic.DialAddrEarly(ctx, quicListener.Addr().String(), tlsClientConfig, quicConfig)
|
||||
require.NoError(t, err)
|
||||
defer quicSession.CloseWithError(0, "")
|
||||
|
||||
|
@ -264,7 +266,7 @@ func validateTracingSpans(t *testing.T, receivedPacket Packet, expectedSpan *Tra
|
|||
require.Equal(t, tracingSpans, expectedSpan)
|
||||
}
|
||||
|
||||
func newQUICListener(t *testing.T, config *quic.Config) quic.Listener {
|
||||
func newQUICListener(t *testing.T, config *quic.Config) *quic.Listener {
|
||||
// Create a simple tls config.
|
||||
tlsConfig := generateTLSConfig()
|
||||
|
||||
|
|
|
@ -4,8 +4,8 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/quic-go/quic-go"
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"github.com/cloudflare/cloudflared/packet"
|
||||
|
|
|
@ -3,8 +3,8 @@ package quic
|
|||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/logging"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/quic-go/quic-go/logging"
|
||||
)
|
||||
|
||||
const (
|
||||
|
|
|
@ -3,9 +3,9 @@
|
|||
package quic
|
||||
|
||||
const (
|
||||
// Due to https://github.com/lucas-clemente/quic-go/issues/3273, MTU discovery is disabled on Windows
|
||||
// 1220 is the default value https://github.com/lucas-clemente/quic-go/blob/84e03e59760ceee37359688871bb0688fcc4e98f/internal/protocol/params.go#L138
|
||||
// Due to https://github.com/quic-go/quic-go/issues/3273, MTU discovery is disabled on Windows
|
||||
// 1220 is the default value https://github.com/quic-go/quic-go/blob/84e03e59760ceee37359688871bb0688fcc4e98f/internal/protocol/params.go#L138
|
||||
MaxDatagramFrameSize = 1220
|
||||
// 3 more bytes are reserved at https://github.com/lucas-clemente/quic-go/blob/v0.24.0/internal/wire/datagram_frame.go#L61
|
||||
// 3 more bytes are reserved at https://github.com/quic-go/quic-go/blob/v0.24.0/internal/wire/datagram_frame.go#L61
|
||||
maxDatagramPayloadSize = MaxDatagramFrameSize - 3 - sessionIDLen - typeIDLen
|
||||
)
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/quic-go/quic-go"
|
||||
)
|
||||
|
||||
type SafeStreamCloser struct {
|
||||
|
|
|
@ -9,7 +9,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/quic-go/quic-go"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
|
@ -56,7 +56,9 @@ func quicClient(t *testing.T, addr net.Addr) {
|
|||
InsecureSkipVerify: true,
|
||||
NextProtos: []string{"argotunnel"},
|
||||
}
|
||||
session, err := quic.DialAddr(addr.String(), tlsConf, testQUICConfig)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
session, err := quic.DialAddr(ctx, addr.String(), tlsConf, testQUICConfig)
|
||||
require.NoError(t, err)
|
||||
|
||||
var wg sync.WaitGroup
|
||||
|
|
|
@ -5,7 +5,7 @@ import (
|
|||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/logging"
|
||||
"github.com/quic-go/quic-go/logging"
|
||||
"github.com/rs/zerolog"
|
||||
)
|
||||
|
||||
|
@ -21,14 +21,15 @@ type tracerConfig struct {
|
|||
index uint8
|
||||
}
|
||||
|
||||
func NewClientTracer(logger *zerolog.Logger, index uint8) logging.Tracer {
|
||||
return &tracer{
|
||||
func NewClientTracer(logger *zerolog.Logger, index uint8) func(context.Context, logging.Perspective, logging.ConnectionID) logging.ConnectionTracer {
|
||||
t := &tracer{
|
||||
logger: logger,
|
||||
config: &tracerConfig{
|
||||
isClient: true,
|
||||
index: index,
|
||||
},
|
||||
}
|
||||
return t.TracerForConnection
|
||||
}
|
||||
|
||||
func NewServerTracer(logger *zerolog.Logger) logging.Tracer {
|
||||
|
@ -47,7 +48,10 @@ func (t *tracer) TracerForConnection(_ctx context.Context, _p logging.Perspectiv
|
|||
return newConnTracer(newServiceCollector())
|
||||
}
|
||||
|
||||
func (*tracer) SentPacket(net.Addr, *logging.Header, logging.ByteCount, []logging.Frame) {}
|
||||
func (*tracer) SentPacket(net.Addr, *logging.Header, logging.ByteCount, []logging.Frame) {
|
||||
}
|
||||
func (*tracer) SentVersionNegotiationPacket(_ net.Addr, dest, src logging.ArbitraryLenConnectionID, _ []logging.VersionNumber) {
|
||||
}
|
||||
func (*tracer) DroppedPacket(net.Addr, logging.PacketType, logging.ByteCount, logging.PacketDropReason) {
|
||||
}
|
||||
|
||||
|
@ -82,7 +86,7 @@ func (ct *connTracer) ReceivedPacket(hdr *logging.ExtendedHeader, size logging.B
|
|||
ct.metricsCollector.receivedPackets(size)
|
||||
}
|
||||
|
||||
func (ct *connTracer) BufferedPacket(pt logging.PacketType) {
|
||||
func (ct *connTracer) BufferedPacket(pt logging.PacketType, size logging.ByteCount) {
|
||||
ct.metricsCollector.bufferedPackets(pt)
|
||||
}
|
||||
|
||||
|
@ -110,12 +114,24 @@ func (ct *connTracer) ReceivedTransportParameters(parameters *logging.TransportP
|
|||
func (ct *connTracer) RestoredTransportParameters(parameters *logging.TransportParameters) {
|
||||
}
|
||||
|
||||
func (ct *connTracer) ReceivedVersionNegotiationPacket(header *logging.Header, numbers []logging.VersionNumber) {
|
||||
func (ct *connTracer) SentLongHeaderPacket(hdr *logging.ExtendedHeader, size logging.ByteCount, ack *logging.AckFrame, frames []logging.Frame) {
|
||||
}
|
||||
|
||||
func (ct *connTracer) SentShortHeaderPacket(hdr *logging.ShortHeader, size logging.ByteCount, ack *logging.AckFrame, frames []logging.Frame) {
|
||||
}
|
||||
|
||||
func (ct *connTracer) ReceivedVersionNegotiationPacket(dest, src logging.ArbitraryLenConnectionID, _ []logging.VersionNumber) {
|
||||
}
|
||||
|
||||
func (ct *connTracer) ReceivedRetry(header *logging.Header) {
|
||||
}
|
||||
|
||||
func (ct *connTracer) ReceivedLongHeaderPacket(hdr *logging.ExtendedHeader, size logging.ByteCount, frames []logging.Frame) {
|
||||
}
|
||||
|
||||
func (ct *connTracer) ReceivedShortHeaderPacket(hdr *logging.ShortHeader, size logging.ByteCount, frames []logging.Frame) {
|
||||
}
|
||||
|
||||
func (ct *connTracer) AcknowledgedPacket(level logging.EncryptionLevel, number logging.PacketNumber) {
|
||||
}
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ import (
|
|||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/quic-go/quic-go"
|
||||
"github.com/rs/zerolog"
|
||||
|
||||
"github.com/cloudflare/cloudflared/connection"
|
||||
|
|
|
@ -11,8 +11,8 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/quic-go/quic-go"
|
||||
"github.com/rs/zerolog"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
|
@ -605,6 +605,7 @@ func (e *EdgeTunnelServer) serveQUIC(
|
|||
}
|
||||
|
||||
quicConn, err := connection.NewQUICConnection(
|
||||
ctx,
|
||||
quicConfig,
|
||||
edgeAddr,
|
||||
e.edgeBindAddr,
|
||||
|
|
|
@ -4,7 +4,7 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go"
|
||||
"github.com/quic-go/quic-go"
|
||||
"github.com/rs/zerolog"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
*.test
|
||||
*.prof
|
||||
|
||||
genny
|
|
@ -1,6 +0,0 @@
|
|||
language: go
|
||||
|
||||
go:
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.9
|
|
@ -1,22 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 cheekybits
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
@ -1,245 +0,0 @@
|
|||
# genny - Generics for Go
|
||||
|
||||
[![Build Status](https://travis-ci.org/cheekybits/genny.svg?branch=master)](https://travis-ci.org/cheekybits/genny) [![GoDoc](https://godoc.org/github.com/cheekybits/genny/parse?status.png)](http://godoc.org/github.com/cheekybits/genny/parse)
|
||||
|
||||
Install:
|
||||
|
||||
```
|
||||
go get github.com/cheekybits/genny
|
||||
```
|
||||
|
||||
=====
|
||||
|
||||
(pron. Jenny) by Mat Ryer ([@matryer](https://twitter.com/matryer)) and Tyler Bunnell ([@TylerJBunnell](https://twitter.com/TylerJBunnell)).
|
||||
|
||||
Until the Go core team include support for [generics in Go](http://golang.org/doc/faq#generics), `genny` is a code-generation generics solution. It allows you write normal buildable and testable Go code which, when processed by the `genny gen` tool, will replace the generics with specific types.
|
||||
|
||||
* Generic code is valid Go code
|
||||
* Generic code compiles and can be tested
|
||||
* Use `stdin` and `stdout` or specify in and out files
|
||||
* Supports Go 1.4's [go generate](http://tip.golang.org/doc/go1.4#gogenerate)
|
||||
* Multiple specific types will generate every permutation
|
||||
* Use `BUILTINS` and `NUMBERS` wildtype to generate specific code for all built-in (and number) Go types
|
||||
* Function names and comments also get updated
|
||||
|
||||
## Library
|
||||
|
||||
We have started building a [library of common things](https://github.com/cheekybits/gennylib), and you can use `genny get` to generate the specific versions you need.
|
||||
|
||||
For example: `genny get maps/concurrentmap.go "KeyType=BUILTINS ValueType=BUILTINS"` will print out generated code for all types for a concurrent map. Any file in the library may be generated locally in this way using all the same options given to `genny gen`.
|
||||
|
||||
## Usage
|
||||
|
||||
```
|
||||
genny [{flags}] gen "{types}"
|
||||
|
||||
gen - generates type specific code from generic code.
|
||||
get <package/file> - fetch a generic template from the online library and gen it.
|
||||
|
||||
{flags} - (optional) Command line flags (see below)
|
||||
{types} - (required) Specific types for each generic type in the source
|
||||
{types} format: {generic}={specific}[,another][ {generic2}={specific2}]
|
||||
|
||||
Examples:
|
||||
Generic=Specific
|
||||
Generic1=Specific1 Generic2=Specific2
|
||||
Generic1=Specific1,Specific2 Generic2=Specific3,Specific4
|
||||
|
||||
Flags:
|
||||
-in="": file to parse instead of stdin
|
||||
-out="": file to save output to instead of stdout
|
||||
-pkg="": package name for generated files
|
||||
```
|
||||
|
||||
* Comma separated type lists will generate code for each type
|
||||
|
||||
### Flags
|
||||
|
||||
* `-in` - specify the input file (rather than using stdin)
|
||||
* `-out` - specify the output file (rather than using stdout)
|
||||
|
||||
### go generate
|
||||
|
||||
To use Go 1.4's `go generate` capability, insert the following comment in your source code file:
|
||||
|
||||
```
|
||||
//go:generate genny -in=$GOFILE -out=gen-$GOFILE gen "KeyType=string,int ValueType=string,int"
|
||||
```
|
||||
|
||||
* Start the line with `//go:generate `
|
||||
* Use the `-in` and `-out` flags to specify the files to work on
|
||||
* Use the `genny` command as usual after the flags
|
||||
|
||||
Now, running `go generate` (in a shell) for the package will cause the generic versions of the files to be generated.
|
||||
|
||||
* The output file will be overwritten, so it's safe to call `go generate` many times
|
||||
* Use `$GOFILE` to refer to the current file
|
||||
* The `//go:generate` line will be removed from the output
|
||||
|
||||
To see a real example of how to use `genny` with `go generate`, look in the [example/go-generate directory](https://github.com/cheekybits/genny/tree/master/examples/go-generate).
|
||||
|
||||
## How it works
|
||||
|
||||
Define your generic types using the special `generic.Type` placeholder type:
|
||||
|
||||
```go
|
||||
type KeyType generic.Type
|
||||
type ValueType generic.Type
|
||||
```
|
||||
|
||||
* You can use as many as you like
|
||||
* Give them meaningful names
|
||||
|
||||
Then write the generic code referencing the types as your normally would:
|
||||
|
||||
```go
|
||||
func SetValueTypeForKeyType(key KeyType, value ValueType) { /* ... */ }
|
||||
```
|
||||
|
||||
* Generic type names will also be replaced in comments and function names (see Real example below)
|
||||
|
||||
Since `generic.Type` is a real Go type, your code will compile, and you can even write unit tests against your generic code.
|
||||
|
||||
#### Generating specific versions
|
||||
|
||||
Pass the file through the `genny gen` tool with the specific types as the argument:
|
||||
|
||||
```
|
||||
cat generic.go | genny gen "KeyType=string ValueType=interface{}"
|
||||
```
|
||||
|
||||
The output will be the complete Go source file with the generic types replaced with the types specified in the arguments.
|
||||
|
||||
## Real example
|
||||
|
||||
Given [this generic Go code](https://github.com/cheekybits/genny/tree/master/examples/queue) which compiles and is tested:
|
||||
|
||||
```go
|
||||
package queue
|
||||
|
||||
import "github.com/cheekybits/genny/generic"
|
||||
|
||||
// NOTE: this is how easy it is to define a generic type
|
||||
type Something generic.Type
|
||||
|
||||
// SomethingQueue is a queue of Somethings.
|
||||
type SomethingQueue struct {
|
||||
items []Something
|
||||
}
|
||||
|
||||
func NewSomethingQueue() *SomethingQueue {
|
||||
return &SomethingQueue{items: make([]Something, 0)}
|
||||
}
|
||||
func (q *SomethingQueue) Push(item Something) {
|
||||
q.items = append(q.items, item)
|
||||
}
|
||||
func (q *SomethingQueue) Pop() Something {
|
||||
item := q.items[0]
|
||||
q.items = q.items[1:]
|
||||
return item
|
||||
}
|
||||
```
|
||||
|
||||
When `genny gen` is invoked like this:
|
||||
|
||||
```
|
||||
cat source.go | genny gen "Something=string"
|
||||
```
|
||||
|
||||
It outputs:
|
||||
|
||||
```go
|
||||
// This file was automatically generated by genny.
|
||||
// Any changes will be lost if this file is regenerated.
|
||||
// see https://github.com/cheekybits/genny
|
||||
|
||||
package queue
|
||||
|
||||
// StringQueue is a queue of Strings.
|
||||
type StringQueue struct {
|
||||
items []string
|
||||
}
|
||||
|
||||
func NewStringQueue() *StringQueue {
|
||||
return &StringQueue{items: make([]string, 0)}
|
||||
}
|
||||
func (q *StringQueue) Push(item string) {
|
||||
q.items = append(q.items, item)
|
||||
}
|
||||
func (q *StringQueue) Pop() string {
|
||||
item := q.items[0]
|
||||
q.items = q.items[1:]
|
||||
return item
|
||||
}
|
||||
```
|
||||
|
||||
To get a _something_ for every built-in Go type plus one of your own types, you could run:
|
||||
|
||||
```
|
||||
cat source.go | genny gen "Something=BUILTINS,*MyType"
|
||||
```
|
||||
|
||||
#### More examples
|
||||
|
||||
Check out the [test code files](https://github.com/cheekybits/genny/tree/master/parse/test) for more real examples.
|
||||
|
||||
## Writing test code
|
||||
|
||||
Once you have defined a generic type with some code worth testing:
|
||||
|
||||
```go
|
||||
package slice
|
||||
|
||||
import (
|
||||
"log"
|
||||
"reflect"
|
||||
|
||||
"github.com/stretchr/gogen/generic"
|
||||
)
|
||||
|
||||
type MyType generic.Type
|
||||
|
||||
func EnsureMyTypeSlice(objectOrSlice interface{}) []MyType {
|
||||
log.Printf("%v", reflect.TypeOf(objectOrSlice))
|
||||
switch obj := objectOrSlice.(type) {
|
||||
case []MyType:
|
||||
log.Println(" returning it untouched")
|
||||
return obj
|
||||
case MyType:
|
||||
log.Println(" wrapping in slice")
|
||||
return []MyType{obj}
|
||||
default:
|
||||
panic("ensure slice needs MyType or []MyType")
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
You can treat it like any normal Go type in your test code:
|
||||
|
||||
```go
|
||||
func TestEnsureMyTypeSlice(t *testing.T) {
|
||||
|
||||
myType := new(MyType)
|
||||
slice := EnsureMyTypeSlice(myType)
|
||||
if assert.NotNil(t, slice) {
|
||||
assert.Equal(t, slice[0], myType)
|
||||
}
|
||||
|
||||
slice = EnsureMyTypeSlice(slice)
|
||||
log.Printf("%#v", slice[0])
|
||||
if assert.NotNil(t, slice) {
|
||||
assert.Equal(t, slice[0], myType)
|
||||
}
|
||||
|
||||
}
|
||||
```
|
||||
|
||||
### Understanding what `generic.Type` is
|
||||
|
||||
Because `generic.Type` is an empty interface type (literally `interface{}`) every other type will be considered to be a `generic.Type` if you are switching on the type of an object. Of course, once the specific versions are generated, this issue goes away but it's worth knowing when you are writing your tests against generic code.
|
||||
|
||||
### Contributions
|
||||
|
||||
* See the [API documentation for the parse package](http://godoc.org/github.com/cheekybits/genny/parse)
|
||||
* Please do TDD
|
||||
* All input welcome
|
|
@ -1,2 +0,0 @@
|
|||
// Package main is the command line tool for Genny.
|
||||
package main
|
|
@ -1,2 +0,0 @@
|
|||
// Package generic contains the generic marker types.
|
||||
package generic
|
|
@ -1,13 +0,0 @@
|
|||
package generic
|
||||
|
||||
// Type is the placeholder type that indicates a generic value.
|
||||
// When genny is executed, variables of this type will be replaced with
|
||||
// references to the specific types.
|
||||
// var GenericType generic.Type
|
||||
type Type interface{}
|
||||
|
||||
// Number is the placehoder type that indiccates a generic numerical value.
|
||||
// When genny is executed, variables of this type will be replaced with
|
||||
// references to the specific types.
|
||||
// var GenericType generic.Number
|
||||
type Number float64
|
|
@ -1,154 +0,0 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/cheekybits/genny/out"
|
||||
"github.com/cheekybits/genny/parse"
|
||||
)
|
||||
|
||||
/*
|
||||
|
||||
source | genny gen [-in=""] [-out=""] [-pkg=""] "KeyType=string,int ValueType=string,int"
|
||||
|
||||
*/
|
||||
|
||||
const (
|
||||
_ = iota
|
||||
exitcodeInvalidArgs
|
||||
exitcodeInvalidTypeSet
|
||||
exitcodeStdinFailed
|
||||
exitcodeGenFailed
|
||||
exitcodeGetFailed
|
||||
exitcodeSourceFileInvalid
|
||||
exitcodeDestFileFailed
|
||||
)
|
||||
|
||||
func main() {
|
||||
var (
|
||||
in = flag.String("in", "", "file to parse instead of stdin")
|
||||
out = flag.String("out", "", "file to save output to instead of stdout")
|
||||
pkgName = flag.String("pkg", "", "package name for generated files")
|
||||
prefix = "https://github.com/metabition/gennylib/raw/master/"
|
||||
)
|
||||
flag.Parse()
|
||||
args := flag.Args()
|
||||
|
||||
if len(args) < 2 {
|
||||
usage()
|
||||
os.Exit(exitcodeInvalidArgs)
|
||||
}
|
||||
|
||||
if strings.ToLower(args[0]) != "gen" && strings.ToLower(args[0]) != "get" {
|
||||
usage()
|
||||
os.Exit(exitcodeInvalidArgs)
|
||||
}
|
||||
|
||||
// parse the typesets
|
||||
var setsArg = args[1]
|
||||
if strings.ToLower(args[0]) == "get" {
|
||||
setsArg = args[2]
|
||||
}
|
||||
typeSets, err := parse.TypeSet(setsArg)
|
||||
if err != nil {
|
||||
fatal(exitcodeInvalidTypeSet, err)
|
||||
}
|
||||
|
||||
outWriter := newWriter(*out)
|
||||
|
||||
if strings.ToLower(args[0]) == "get" {
|
||||
if len(args) != 3 {
|
||||
fmt.Println("not enough arguments to get")
|
||||
usage()
|
||||
os.Exit(exitcodeInvalidArgs)
|
||||
}
|
||||
r, err := http.Get(prefix + args[1])
|
||||
if err != nil {
|
||||
fatal(exitcodeGetFailed, err)
|
||||
}
|
||||
b, err := ioutil.ReadAll(r.Body)
|
||||
if err != nil {
|
||||
fatal(exitcodeGetFailed, err)
|
||||
}
|
||||
r.Body.Close()
|
||||
br := bytes.NewReader(b)
|
||||
err = gen(*in, *pkgName, br, typeSets, outWriter)
|
||||
} else if len(*in) > 0 {
|
||||
var file *os.File
|
||||
file, err = os.Open(*in)
|
||||
if err != nil {
|
||||
fatal(exitcodeSourceFileInvalid, err)
|
||||
}
|
||||
defer file.Close()
|
||||
err = gen(*in, *pkgName, file, typeSets, outWriter)
|
||||
} else {
|
||||
var source []byte
|
||||
source, err = ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
fatal(exitcodeStdinFailed, err)
|
||||
}
|
||||
reader := bytes.NewReader(source)
|
||||
err = gen("stdin", *pkgName, reader, typeSets, outWriter)
|
||||
}
|
||||
|
||||
// do the work
|
||||
if err != nil {
|
||||
fatal(exitcodeGenFailed, err)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func usage() {
|
||||
fmt.Fprintln(os.Stderr, `usage: genny [{flags}] gen "{types}"
|
||||
|
||||
gen - generates type specific code from generic code.
|
||||
get <package/file> - fetch a generic template from the online library and gen it.
|
||||
|
||||
{flags} - (optional) Command line flags (see below)
|
||||
{types} - (required) Specific types for each generic type in the source
|
||||
{types} format: {generic}={specific}[,another][ {generic2}={specific2}]
|
||||
|
||||
Examples:
|
||||
Generic=Specific
|
||||
Generic1=Specific1 Generic2=Specific2
|
||||
Generic1=Specific1,Specific2 Generic2=Specific3,Specific4
|
||||
|
||||
Flags:`)
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
func newWriter(fileName string) io.Writer {
|
||||
if fileName == "" {
|
||||
return os.Stdout
|
||||
}
|
||||
lf := &out.LazyFile{FileName: fileName}
|
||||
defer lf.Close()
|
||||
return lf
|
||||
}
|
||||
|
||||
func fatal(code int, a ...interface{}) {
|
||||
fmt.Println(a...)
|
||||
os.Exit(code)
|
||||
}
|
||||
|
||||
// gen performs the generic generation.
|
||||
func gen(filename, pkgName string, in io.ReadSeeker, typesets []map[string]string, out io.Writer) error {
|
||||
|
||||
var output []byte
|
||||
var err error
|
||||
|
||||
output, err = parse.Generics(filename, pkgName, in, typesets)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out.Write(output)
|
||||
return nil
|
||||
}
|
|
@ -1,38 +0,0 @@
|
|||
package out
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path"
|
||||
)
|
||||
|
||||
// LazyFile is an io.WriteCloser which defers creation of the file it is supposed to write in
|
||||
// till the first call to its write function in order to prevent creation of file, if no write
|
||||
// is supposed to happen.
|
||||
type LazyFile struct {
|
||||
// FileName is path to the file to which genny will write.
|
||||
FileName string
|
||||
file *os.File
|
||||
}
|
||||
|
||||
// Close closes the file if it is created. Returns nil if no file is created.
|
||||
func (lw *LazyFile) Close() error {
|
||||
if lw.file != nil {
|
||||
return lw.file.Close()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes to the specified file and creates the file first time it is called.
|
||||
func (lw *LazyFile) Write(p []byte) (int, error) {
|
||||
if lw.file == nil {
|
||||
err := os.MkdirAll(path.Dir(lw.FileName), 0755)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
lw.file, err = os.Create(lw.FileName)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
return lw.file.Write(p)
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
package parse
|
||||
|
||||
// Builtins contains a slice of all built-in Go types.
|
||||
var Builtins = []string{
|
||||
"bool",
|
||||
"byte",
|
||||
"complex128",
|
||||
"complex64",
|
||||
"error",
|
||||
"float32",
|
||||
"float64",
|
||||
"int",
|
||||
"int16",
|
||||
"int32",
|
||||
"int64",
|
||||
"int8",
|
||||
"rune",
|
||||
"string",
|
||||
"uint",
|
||||
"uint16",
|
||||
"uint32",
|
||||
"uint64",
|
||||
"uint8",
|
||||
"uintptr",
|
||||
}
|
||||
|
||||
// Numbers contains a slice of all built-in number types.
|
||||
var Numbers = []string{
|
||||
"float32",
|
||||
"float64",
|
||||
"int",
|
||||
"int16",
|
||||
"int32",
|
||||
"int64",
|
||||
"int8",
|
||||
"uint",
|
||||
"uint16",
|
||||
"uint32",
|
||||
"uint64",
|
||||
"uint8",
|
||||
}
|
|
@ -1,14 +0,0 @@
|
|||
// Package parse contains the generic code generation capabilities
|
||||
// that power genny.
|
||||
//
|
||||
// genny gen "{types}"
|
||||
//
|
||||
// gen - generates type specific code (to stdout) from generic code (via stdin)
|
||||
//
|
||||
// {types} - (required) Specific types for each generic type in the source
|
||||
// {types} format: {generic}={specific}[,another][ {generic2}={specific2}]
|
||||
// Examples:
|
||||
// Generic=Specific
|
||||
// Generic1=Specific1 Generic2=Specific2
|
||||
// Generic1=Specific1,Specific2 Generic2=Specific3,Specific4
|
||||
package parse
|
|
@ -1,47 +0,0 @@
|
|||
package parse
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
// errMissingSpecificType represents an error when a generic type is not
|
||||
// satisfied by a specific type.
|
||||
type errMissingSpecificType struct {
|
||||
GenericType string
|
||||
}
|
||||
|
||||
// Error gets a human readable string describing this error.
|
||||
func (e errMissingSpecificType) Error() string {
|
||||
return "Missing specific type for '" + e.GenericType + "' generic type"
|
||||
}
|
||||
|
||||
// errImports represents an error from goimports.
|
||||
type errImports struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error gets a human readable string describing this error.
|
||||
func (e errImports) Error() string {
|
||||
return "Failed to goimports the generated code: " + e.Err.Error()
|
||||
}
|
||||
|
||||
// errSource represents an error with the source file.
|
||||
type errSource struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
// Error gets a human readable string describing this error.
|
||||
func (e errSource) Error() string {
|
||||
return "Failed to parse source file: " + e.Err.Error()
|
||||
}
|
||||
|
||||
type errBadTypeArgs struct {
|
||||
Message string
|
||||
Arg string
|
||||
}
|
||||
|
||||
func (e errBadTypeArgs) Error() string {
|
||||
return "\"" + e.Arg + "\" is bad: " + e.Message
|
||||
}
|
||||
|
||||
var errMissingTypeInformation = errors.New("No type arguments were specified and no \"// +gogen\" tag was found in the source.")
|
|
@ -1,298 +0,0 @@
|
|||
package parse
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/parser"
|
||||
"go/scanner"
|
||||
"go/token"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"golang.org/x/tools/imports"
|
||||
)
|
||||
|
||||
var header = []byte(`
|
||||
|
||||
// This file was automatically generated by genny.
|
||||
// Any changes will be lost if this file is regenerated.
|
||||
// see https://github.com/cheekybits/genny
|
||||
|
||||
`)
|
||||
|
||||
var (
|
||||
packageKeyword = []byte("package")
|
||||
importKeyword = []byte("import")
|
||||
openBrace = []byte("(")
|
||||
closeBrace = []byte(")")
|
||||
genericPackage = "generic"
|
||||
genericType = "generic.Type"
|
||||
genericNumber = "generic.Number"
|
||||
linefeed = "\r\n"
|
||||
)
|
||||
var unwantedLinePrefixes = [][]byte{
|
||||
[]byte("//go:generate genny "),
|
||||
}
|
||||
|
||||
func subIntoLiteral(lit, typeTemplate, specificType string) string {
|
||||
if lit == typeTemplate {
|
||||
return specificType
|
||||
}
|
||||
if !strings.Contains(lit, typeTemplate) {
|
||||
return lit
|
||||
}
|
||||
specificLg := wordify(specificType, true)
|
||||
specificSm := wordify(specificType, false)
|
||||
result := strings.Replace(lit, typeTemplate, specificLg, -1)
|
||||
if strings.HasPrefix(result, specificLg) && !isExported(lit) {
|
||||
return strings.Replace(result, specificLg, specificSm, 1)
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func subTypeIntoComment(line, typeTemplate, specificType string) string {
|
||||
var subbed string
|
||||
for _, w := range strings.Fields(line) {
|
||||
subbed = subbed + subIntoLiteral(w, typeTemplate, specificType) + " "
|
||||
}
|
||||
return subbed
|
||||
}
|
||||
|
||||
// Does the heavy lifting of taking a line of our code and
|
||||
// sbustituting a type into there for our generic type
|
||||
func subTypeIntoLine(line, typeTemplate, specificType string) string {
|
||||
src := []byte(line)
|
||||
var s scanner.Scanner
|
||||
fset := token.NewFileSet()
|
||||
file := fset.AddFile("", fset.Base(), len(src))
|
||||
s.Init(file, src, nil, scanner.ScanComments)
|
||||
output := ""
|
||||
for {
|
||||
_, tok, lit := s.Scan()
|
||||
if tok == token.EOF {
|
||||
break
|
||||
} else if tok == token.COMMENT {
|
||||
subbed := subTypeIntoComment(lit, typeTemplate, specificType)
|
||||
output = output + subbed + " "
|
||||
} else if tok.IsLiteral() {
|
||||
subbed := subIntoLiteral(lit, typeTemplate, specificType)
|
||||
output = output + subbed + " "
|
||||
} else {
|
||||
output = output + tok.String() + " "
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// typeSet looks like "KeyType: int, ValueType: string"
|
||||
func generateSpecific(filename string, in io.ReadSeeker, typeSet map[string]string) ([]byte, error) {
|
||||
|
||||
// ensure we are at the beginning of the file
|
||||
in.Seek(0, os.SEEK_SET)
|
||||
|
||||
// parse the source file
|
||||
fs := token.NewFileSet()
|
||||
file, err := parser.ParseFile(fs, filename, in, 0)
|
||||
if err != nil {
|
||||
return nil, &errSource{Err: err}
|
||||
}
|
||||
|
||||
// make sure every generic.Type is represented in the types
|
||||
// argument.
|
||||
for _, decl := range file.Decls {
|
||||
switch it := decl.(type) {
|
||||
case *ast.GenDecl:
|
||||
for _, spec := range it.Specs {
|
||||
ts, ok := spec.(*ast.TypeSpec)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
switch tt := ts.Type.(type) {
|
||||
case *ast.SelectorExpr:
|
||||
if name, ok := tt.X.(*ast.Ident); ok {
|
||||
if name.Name == genericPackage {
|
||||
if _, ok := typeSet[ts.Name.Name]; !ok {
|
||||
return nil, &errMissingSpecificType{GenericType: ts.Name.Name}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
in.Seek(0, os.SEEK_SET)
|
||||
|
||||
var buf bytes.Buffer
|
||||
|
||||
comment := ""
|
||||
scanner := bufio.NewScanner(in)
|
||||
for scanner.Scan() {
|
||||
|
||||
line := scanner.Text()
|
||||
|
||||
// does this line contain generic.Type?
|
||||
if strings.Contains(line, genericType) || strings.Contains(line, genericNumber) {
|
||||
comment = ""
|
||||
continue
|
||||
}
|
||||
|
||||
for t, specificType := range typeSet {
|
||||
if strings.Contains(line, t) {
|
||||
newLine := subTypeIntoLine(line, t, specificType)
|
||||
line = newLine
|
||||
}
|
||||
}
|
||||
|
||||
if comment != "" {
|
||||
buf.WriteString(makeLine(comment))
|
||||
comment = ""
|
||||
}
|
||||
|
||||
// is this line a comment?
|
||||
// TODO: should we handle /* */ comments?
|
||||
if strings.HasPrefix(line, "//") {
|
||||
// record this line to print later
|
||||
comment = line
|
||||
continue
|
||||
}
|
||||
|
||||
// write the line
|
||||
buf.WriteString(makeLine(line))
|
||||
}
|
||||
|
||||
// write it out
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
// Generics parses the source file and generates the bytes replacing the
|
||||
// generic types for the keys map with the specific types (its value).
|
||||
func Generics(filename, pkgName string, in io.ReadSeeker, typeSets []map[string]string) ([]byte, error) {
|
||||
|
||||
totalOutput := header
|
||||
|
||||
for _, typeSet := range typeSets {
|
||||
|
||||
// generate the specifics
|
||||
parsed, err := generateSpecific(filename, in, typeSet)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
totalOutput = append(totalOutput, parsed...)
|
||||
|
||||
}
|
||||
|
||||
// clean up the code line by line
|
||||
packageFound := false
|
||||
insideImportBlock := false
|
||||
var cleanOutputLines []string
|
||||
scanner := bufio.NewScanner(bytes.NewReader(totalOutput))
|
||||
for scanner.Scan() {
|
||||
|
||||
// end of imports block?
|
||||
if insideImportBlock {
|
||||
if bytes.HasSuffix(scanner.Bytes(), closeBrace) {
|
||||
insideImportBlock = false
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
if bytes.HasPrefix(scanner.Bytes(), packageKeyword) {
|
||||
if packageFound {
|
||||
continue
|
||||
} else {
|
||||
packageFound = true
|
||||
}
|
||||
} else if bytes.HasPrefix(scanner.Bytes(), importKeyword) {
|
||||
if bytes.HasSuffix(scanner.Bytes(), openBrace) {
|
||||
insideImportBlock = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// check all unwantedLinePrefixes - and skip them
|
||||
skipline := false
|
||||
for _, prefix := range unwantedLinePrefixes {
|
||||
if bytes.HasPrefix(scanner.Bytes(), prefix) {
|
||||
skipline = true
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if skipline {
|
||||
continue
|
||||
}
|
||||
|
||||
cleanOutputLines = append(cleanOutputLines, makeLine(scanner.Text()))
|
||||
}
|
||||
|
||||
cleanOutput := strings.Join(cleanOutputLines, "")
|
||||
|
||||
output := []byte(cleanOutput)
|
||||
var err error
|
||||
|
||||
// change package name
|
||||
if pkgName != "" {
|
||||
output = changePackage(bytes.NewReader([]byte(output)), pkgName)
|
||||
}
|
||||
// fix the imports
|
||||
output, err = imports.Process(filename, output, nil)
|
||||
if err != nil {
|
||||
return nil, &errImports{Err: err}
|
||||
}
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
func makeLine(s string) string {
|
||||
return fmt.Sprintln(strings.TrimRight(s, linefeed))
|
||||
}
|
||||
|
||||
// isAlphaNumeric gets whether the rune is alphanumeric or _.
|
||||
func isAlphaNumeric(r rune) bool {
|
||||
return r == '_' || unicode.IsLetter(r) || unicode.IsDigit(r)
|
||||
}
|
||||
|
||||
// wordify turns a type into a nice word for function and type
|
||||
// names etc.
|
||||
func wordify(s string, exported bool) string {
|
||||
s = strings.TrimRight(s, "{}")
|
||||
s = strings.TrimLeft(s, "*&")
|
||||
s = strings.Replace(s, ".", "", -1)
|
||||
if !exported {
|
||||
return s
|
||||
}
|
||||
return strings.ToUpper(string(s[0])) + s[1:]
|
||||
}
|
||||
|
||||
func changePackage(r io.Reader, pkgName string) []byte {
|
||||
var out bytes.Buffer
|
||||
sc := bufio.NewScanner(r)
|
||||
done := false
|
||||
|
||||
for sc.Scan() {
|
||||
s := sc.Text()
|
||||
|
||||
if !done && strings.HasPrefix(s, "package") {
|
||||
parts := strings.Split(s, " ")
|
||||
parts[1] = pkgName
|
||||
s = strings.Join(parts, " ")
|
||||
done = true
|
||||
}
|
||||
|
||||
fmt.Fprintln(&out, s)
|
||||
}
|
||||
return out.Bytes()
|
||||
}
|
||||
|
||||
func isExported(lit string) bool {
|
||||
if len(lit) == 0 {
|
||||
return false
|
||||
}
|
||||
return unicode.IsUpper(rune(lit[0]))
|
||||
}
|
|
@ -1,89 +0,0 @@
|
|||
package parse
|
||||
|
||||
import "strings"
|
||||
|
||||
const (
|
||||
typeSep = " "
|
||||
keyValueSep = "="
|
||||
valuesSep = ","
|
||||
builtins = "BUILTINS"
|
||||
numbers = "NUMBERS"
|
||||
)
|
||||
|
||||
// TypeSet turns a type string into a []map[string]string
|
||||
// that can be given to parse.Generics for it to do its magic.
|
||||
//
|
||||
// Acceptable args are:
|
||||
//
|
||||
// Person=man
|
||||
// Person=man Animal=dog
|
||||
// Person=man Animal=dog Animal2=cat
|
||||
// Person=man,woman Animal=dog,cat
|
||||
// Person=man,woman,child Animal=dog,cat Place=london,paris
|
||||
func TypeSet(arg string) ([]map[string]string, error) {
|
||||
|
||||
types := make(map[string][]string)
|
||||
var keys []string
|
||||
for _, pair := range strings.Split(arg, typeSep) {
|
||||
segs := strings.Split(pair, keyValueSep)
|
||||
if len(segs) != 2 {
|
||||
return nil, &errBadTypeArgs{Arg: arg, Message: "Generic=Specific expected"}
|
||||
}
|
||||
key := segs[0]
|
||||
keys = append(keys, key)
|
||||
types[key] = make([]string, 0)
|
||||
for _, t := range strings.Split(segs[1], valuesSep) {
|
||||
if t == builtins {
|
||||
types[key] = append(types[key], Builtins...)
|
||||
} else if t == numbers {
|
||||
types[key] = append(types[key], Numbers...)
|
||||
} else {
|
||||
types[key] = append(types[key], t)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
cursors := make(map[string]int)
|
||||
for _, key := range keys {
|
||||
cursors[key] = 0
|
||||
}
|
||||
|
||||
outChan := make(chan map[string]string)
|
||||
go func() {
|
||||
buildTypeSet(keys, 0, cursors, types, outChan)
|
||||
close(outChan)
|
||||
}()
|
||||
|
||||
var typeSets []map[string]string
|
||||
for typeSet := range outChan {
|
||||
typeSets = append(typeSets, typeSet)
|
||||
}
|
||||
|
||||
return typeSets, nil
|
||||
|
||||
}
|
||||
|
||||
func buildTypeSet(keys []string, keyI int, cursors map[string]int, types map[string][]string, out chan<- map[string]string) {
|
||||
key := keys[keyI]
|
||||
for cursors[key] < len(types[key]) {
|
||||
if keyI < len(keys)-1 {
|
||||
buildTypeSet(keys, keyI+1, copycursors(cursors), types, out)
|
||||
} else {
|
||||
// build the typeset for this combination
|
||||
ts := make(map[string]string)
|
||||
for k, vals := range types {
|
||||
ts[k] = vals[cursors[k]]
|
||||
}
|
||||
out <- ts
|
||||
}
|
||||
cursors[key]++
|
||||
}
|
||||
}
|
||||
|
||||
func copycursors(source map[string]int) map[string]int {
|
||||
copy := make(map[string]int)
|
||||
for k, v := range source {
|
||||
copy[k] = v
|
||||
}
|
||||
return copy
|
||||
}
|
|
@ -0,0 +1,12 @@
|
|||
# This is the official list of GoMock authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
|
||||
# Names should be added to this file as
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Alex Reece <awreece@gmail.com>
|
||||
Google Inc.
|
|
@ -0,0 +1,37 @@
|
|||
# This is the official list of people who can contribute (and typically
|
||||
# have contributed) code to the gomock repository.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# The submission process automatically checks to make sure
|
||||
# that people submitting code are listed in this file (by email address).
|
||||
#
|
||||
# Names should be added to this file only after verifying that
|
||||
# the individual or the individual's organization has agreed to
|
||||
# the appropriate Contributor License Agreement, found here:
|
||||
#
|
||||
# http://code.google.com/legal/individual-cla-v1.0.html
|
||||
# http://code.google.com/legal/corporate-cla-v1.0.html
|
||||
#
|
||||
# The agreement for individuals can be filled out on the web.
|
||||
#
|
||||
# When adding J Random Contributor's name to this file,
|
||||
# either J's name or J's organization's name should be
|
||||
# added to the AUTHORS file, depending on whether the
|
||||
# individual or corporate CLA was used.
|
||||
|
||||
# Names should be added to this file like so:
|
||||
# Name <email address>
|
||||
#
|
||||
# An entry with two email addresses specifies that the
|
||||
# first address should be used in the submit logs and
|
||||
# that the second address should be recognized as the
|
||||
# same person when interacting with Rietveld.
|
||||
|
||||
# Please keep the list sorted.
|
||||
|
||||
Aaron Jacobs <jacobsa@google.com> <aaronjjacobs@gmail.com>
|
||||
Alex Reece <awreece@gmail.com>
|
||||
David Symonds <dsymonds@golang.org>
|
||||
Ryan Barrett <ryanb@google.com>
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,701 @@
|
|||
// Copyright 2010 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// MockGen generates mock implementations of Go interfaces.
|
||||
package main
|
||||
|
||||
// TODO: This does not support recursive embedded interfaces.
|
||||
// TODO: This does not support embedding package-local interfaces in a separate file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/token"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/golang/mock/mockgen/model"
|
||||
|
||||
"golang.org/x/mod/modfile"
|
||||
toolsimports "golang.org/x/tools/imports"
|
||||
)
|
||||
|
||||
const (
|
||||
gomockImportPath = "github.com/golang/mock/gomock"
|
||||
)
|
||||
|
||||
var (
|
||||
version = ""
|
||||
commit = "none"
|
||||
date = "unknown"
|
||||
)
|
||||
|
||||
var (
|
||||
source = flag.String("source", "", "(source mode) Input Go source file; enables source mode.")
|
||||
destination = flag.String("destination", "", "Output file; defaults to stdout.")
|
||||
mockNames = flag.String("mock_names", "", "Comma-separated interfaceName=mockName pairs of explicit mock names to use. Mock names default to 'Mock'+ interfaceName suffix.")
|
||||
packageOut = flag.String("package", "", "Package of the generated code; defaults to the package of the input with a 'mock_' prefix.")
|
||||
selfPackage = flag.String("self_package", "", "The full package import path for the generated code. The purpose of this flag is to prevent import cycles in the generated code by trying to include its own package. This can happen if the mock's package is set to one of its inputs (usually the main one) and the output is stdio so mockgen cannot detect the final output package. Setting this flag will then tell mockgen which import to exclude.")
|
||||
writePkgComment = flag.Bool("write_package_comment", true, "Writes package documentation comment (godoc) if true.")
|
||||
copyrightFile = flag.String("copyright_file", "", "Copyright file used to add copyright header")
|
||||
|
||||
debugParser = flag.Bool("debug_parser", false, "Print out parser results only.")
|
||||
showVersion = flag.Bool("version", false, "Print version.")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Usage = usage
|
||||
flag.Parse()
|
||||
|
||||
if *showVersion {
|
||||
printVersion()
|
||||
return
|
||||
}
|
||||
|
||||
var pkg *model.Package
|
||||
var err error
|
||||
var packageName string
|
||||
if *source != "" {
|
||||
pkg, err = sourceMode(*source)
|
||||
} else {
|
||||
if flag.NArg() != 2 {
|
||||
usage()
|
||||
log.Fatal("Expected exactly two arguments")
|
||||
}
|
||||
packageName = flag.Arg(0)
|
||||
interfaces := strings.Split(flag.Arg(1), ",")
|
||||
if packageName == "." {
|
||||
dir, err := os.Getwd()
|
||||
if err != nil {
|
||||
log.Fatalf("Get current directory failed: %v", err)
|
||||
}
|
||||
packageName, err = packageNameOfDir(dir)
|
||||
if err != nil {
|
||||
log.Fatalf("Parse package name failed: %v", err)
|
||||
}
|
||||
}
|
||||
pkg, err = reflectMode(packageName, interfaces)
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Loading input failed: %v", err)
|
||||
}
|
||||
|
||||
if *debugParser {
|
||||
pkg.Print(os.Stdout)
|
||||
return
|
||||
}
|
||||
|
||||
dst := os.Stdout
|
||||
if len(*destination) > 0 {
|
||||
if err := os.MkdirAll(filepath.Dir(*destination), os.ModePerm); err != nil {
|
||||
log.Fatalf("Unable to create directory: %v", err)
|
||||
}
|
||||
f, err := os.Create(*destination)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed opening destination file: %v", err)
|
||||
}
|
||||
defer f.Close()
|
||||
dst = f
|
||||
}
|
||||
|
||||
outputPackageName := *packageOut
|
||||
if outputPackageName == "" {
|
||||
// pkg.Name in reflect mode is the base name of the import path,
|
||||
// which might have characters that are illegal to have in package names.
|
||||
outputPackageName = "mock_" + sanitize(pkg.Name)
|
||||
}
|
||||
|
||||
// outputPackagePath represents the fully qualified name of the package of
|
||||
// the generated code. Its purposes are to prevent the module from importing
|
||||
// itself and to prevent qualifying type names that come from its own
|
||||
// package (i.e. if there is a type called X then we want to print "X" not
|
||||
// "package.X" since "package" is this package). This can happen if the mock
|
||||
// is output into an already existing package.
|
||||
outputPackagePath := *selfPackage
|
||||
if outputPackagePath == "" && *destination != "" {
|
||||
dstPath, err := filepath.Abs(filepath.Dir(*destination))
|
||||
if err == nil {
|
||||
pkgPath, err := parsePackageImport(dstPath)
|
||||
if err == nil {
|
||||
outputPackagePath = pkgPath
|
||||
} else {
|
||||
log.Println("Unable to infer -self_package from destination file path:", err)
|
||||
}
|
||||
} else {
|
||||
log.Println("Unable to determine destination file path:", err)
|
||||
}
|
||||
}
|
||||
|
||||
g := new(generator)
|
||||
if *source != "" {
|
||||
g.filename = *source
|
||||
} else {
|
||||
g.srcPackage = packageName
|
||||
g.srcInterfaces = flag.Arg(1)
|
||||
}
|
||||
g.destination = *destination
|
||||
|
||||
if *mockNames != "" {
|
||||
g.mockNames = parseMockNames(*mockNames)
|
||||
}
|
||||
if *copyrightFile != "" {
|
||||
header, err := ioutil.ReadFile(*copyrightFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed reading copyright file: %v", err)
|
||||
}
|
||||
|
||||
g.copyrightHeader = string(header)
|
||||
}
|
||||
if err := g.Generate(pkg, outputPackageName, outputPackagePath); err != nil {
|
||||
log.Fatalf("Failed generating mock: %v", err)
|
||||
}
|
||||
if _, err := dst.Write(g.Output()); err != nil {
|
||||
log.Fatalf("Failed writing to destination: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func parseMockNames(names string) map[string]string {
|
||||
mocksMap := make(map[string]string)
|
||||
for _, kv := range strings.Split(names, ",") {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
if len(parts) != 2 || parts[1] == "" {
|
||||
log.Fatalf("bad mock names spec: %v", kv)
|
||||
}
|
||||
mocksMap[parts[0]] = parts[1]
|
||||
}
|
||||
return mocksMap
|
||||
}
|
||||
|
||||
func usage() {
|
||||
_, _ = io.WriteString(os.Stderr, usageText)
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
|
||||
const usageText = `mockgen has two modes of operation: source and reflect.
|
||||
|
||||
Source mode generates mock interfaces from a source file.
|
||||
It is enabled by using the -source flag. Other flags that
|
||||
may be useful in this mode are -imports and -aux_files.
|
||||
Example:
|
||||
mockgen -source=foo.go [other options]
|
||||
|
||||
Reflect mode generates mock interfaces by building a program
|
||||
that uses reflection to understand interfaces. It is enabled
|
||||
by passing two non-flag arguments: an import path, and a
|
||||
comma-separated list of symbols.
|
||||
Example:
|
||||
mockgen database/sql/driver Conn,Driver
|
||||
|
||||
`
|
||||
|
||||
type generator struct {
|
||||
buf bytes.Buffer
|
||||
indent string
|
||||
mockNames map[string]string // may be empty
|
||||
filename string // may be empty
|
||||
destination string // may be empty
|
||||
srcPackage, srcInterfaces string // may be empty
|
||||
copyrightHeader string
|
||||
|
||||
packageMap map[string]string // map from import path to package name
|
||||
}
|
||||
|
||||
func (g *generator) p(format string, args ...interface{}) {
|
||||
fmt.Fprintf(&g.buf, g.indent+format+"\n", args...)
|
||||
}
|
||||
|
||||
func (g *generator) in() {
|
||||
g.indent += "\t"
|
||||
}
|
||||
|
||||
func (g *generator) out() {
|
||||
if len(g.indent) > 0 {
|
||||
g.indent = g.indent[0 : len(g.indent)-1]
|
||||
}
|
||||
}
|
||||
|
||||
// sanitize cleans up a string to make a suitable package name.
|
||||
func sanitize(s string) string {
|
||||
t := ""
|
||||
for _, r := range s {
|
||||
if t == "" {
|
||||
if unicode.IsLetter(r) || r == '_' {
|
||||
t += string(r)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
if unicode.IsLetter(r) || unicode.IsDigit(r) || r == '_' {
|
||||
t += string(r)
|
||||
continue
|
||||
}
|
||||
}
|
||||
t += "_"
|
||||
}
|
||||
if t == "_" {
|
||||
t = "x"
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
func (g *generator) Generate(pkg *model.Package, outputPkgName string, outputPackagePath string) error {
|
||||
if outputPkgName != pkg.Name && *selfPackage == "" {
|
||||
// reset outputPackagePath if it's not passed in through -self_package
|
||||
outputPackagePath = ""
|
||||
}
|
||||
|
||||
if g.copyrightHeader != "" {
|
||||
lines := strings.Split(g.copyrightHeader, "\n")
|
||||
for _, line := range lines {
|
||||
g.p("// %s", line)
|
||||
}
|
||||
g.p("")
|
||||
}
|
||||
|
||||
g.p("// Code generated by MockGen. DO NOT EDIT.")
|
||||
if g.filename != "" {
|
||||
g.p("// Source: %v", g.filename)
|
||||
} else {
|
||||
g.p("// Source: %v (interfaces: %v)", g.srcPackage, g.srcInterfaces)
|
||||
}
|
||||
g.p("")
|
||||
|
||||
// Get all required imports, and generate unique names for them all.
|
||||
im := pkg.Imports()
|
||||
im[gomockImportPath] = true
|
||||
|
||||
// Only import reflect if it's used. We only use reflect in mocked methods
|
||||
// so only import if any of the mocked interfaces have methods.
|
||||
for _, intf := range pkg.Interfaces {
|
||||
if len(intf.Methods) > 0 {
|
||||
im["reflect"] = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// Sort keys to make import alias generation predictable
|
||||
sortedPaths := make([]string, len(im))
|
||||
x := 0
|
||||
for pth := range im {
|
||||
sortedPaths[x] = pth
|
||||
x++
|
||||
}
|
||||
sort.Strings(sortedPaths)
|
||||
|
||||
packagesName := createPackageMap(sortedPaths)
|
||||
|
||||
g.packageMap = make(map[string]string, len(im))
|
||||
localNames := make(map[string]bool, len(im))
|
||||
for _, pth := range sortedPaths {
|
||||
base, ok := packagesName[pth]
|
||||
if !ok {
|
||||
base = sanitize(path.Base(pth))
|
||||
}
|
||||
|
||||
// Local names for an imported package can usually be the basename of the import path.
|
||||
// A couple of situations don't permit that, such as duplicate local names
|
||||
// (e.g. importing "html/template" and "text/template"), or where the basename is
|
||||
// a keyword (e.g. "foo/case").
|
||||
// try base0, base1, ...
|
||||
pkgName := base
|
||||
i := 0
|
||||
for localNames[pkgName] || token.Lookup(pkgName).IsKeyword() {
|
||||
pkgName = base + strconv.Itoa(i)
|
||||
i++
|
||||
}
|
||||
|
||||
// Avoid importing package if source pkg == output pkg
|
||||
if pth == pkg.PkgPath && outputPackagePath == pkg.PkgPath {
|
||||
continue
|
||||
}
|
||||
|
||||
g.packageMap[pth] = pkgName
|
||||
localNames[pkgName] = true
|
||||
}
|
||||
|
||||
if *writePkgComment {
|
||||
g.p("// Package %v is a generated GoMock package.", outputPkgName)
|
||||
}
|
||||
g.p("package %v", outputPkgName)
|
||||
g.p("")
|
||||
g.p("import (")
|
||||
g.in()
|
||||
for pkgPath, pkgName := range g.packageMap {
|
||||
if pkgPath == outputPackagePath {
|
||||
continue
|
||||
}
|
||||
g.p("%v %q", pkgName, pkgPath)
|
||||
}
|
||||
for _, pkgPath := range pkg.DotImports {
|
||||
g.p(". %q", pkgPath)
|
||||
}
|
||||
g.out()
|
||||
g.p(")")
|
||||
|
||||
for _, intf := range pkg.Interfaces {
|
||||
if err := g.GenerateMockInterface(intf, outputPackagePath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// The name of the mock type to use for the given interface identifier.
|
||||
func (g *generator) mockName(typeName string) string {
|
||||
if mockName, ok := g.mockNames[typeName]; ok {
|
||||
return mockName
|
||||
}
|
||||
|
||||
return "Mock" + typeName
|
||||
}
|
||||
|
||||
func (g *generator) GenerateMockInterface(intf *model.Interface, outputPackagePath string) error {
|
||||
mockType := g.mockName(intf.Name)
|
||||
|
||||
g.p("")
|
||||
g.p("// %v is a mock of %v interface.", mockType, intf.Name)
|
||||
g.p("type %v struct {", mockType)
|
||||
g.in()
|
||||
g.p("ctrl *gomock.Controller")
|
||||
g.p("recorder *%vMockRecorder", mockType)
|
||||
g.out()
|
||||
g.p("}")
|
||||
g.p("")
|
||||
|
||||
g.p("// %vMockRecorder is the mock recorder for %v.", mockType, mockType)
|
||||
g.p("type %vMockRecorder struct {", mockType)
|
||||
g.in()
|
||||
g.p("mock *%v", mockType)
|
||||
g.out()
|
||||
g.p("}")
|
||||
g.p("")
|
||||
|
||||
g.p("// New%v creates a new mock instance.", mockType)
|
||||
g.p("func New%v(ctrl *gomock.Controller) *%v {", mockType, mockType)
|
||||
g.in()
|
||||
g.p("mock := &%v{ctrl: ctrl}", mockType)
|
||||
g.p("mock.recorder = &%vMockRecorder{mock}", mockType)
|
||||
g.p("return mock")
|
||||
g.out()
|
||||
g.p("}")
|
||||
g.p("")
|
||||
|
||||
// XXX: possible name collision here if someone has EXPECT in their interface.
|
||||
g.p("// EXPECT returns an object that allows the caller to indicate expected use.")
|
||||
g.p("func (m *%v) EXPECT() *%vMockRecorder {", mockType, mockType)
|
||||
g.in()
|
||||
g.p("return m.recorder")
|
||||
g.out()
|
||||
g.p("}")
|
||||
|
||||
g.GenerateMockMethods(mockType, intf, outputPackagePath)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
type byMethodName []*model.Method
|
||||
|
||||
func (b byMethodName) Len() int { return len(b) }
|
||||
func (b byMethodName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byMethodName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
func (g *generator) GenerateMockMethods(mockType string, intf *model.Interface, pkgOverride string) {
|
||||
sort.Sort(byMethodName(intf.Methods))
|
||||
for _, m := range intf.Methods {
|
||||
g.p("")
|
||||
_ = g.GenerateMockMethod(mockType, m, pkgOverride)
|
||||
g.p("")
|
||||
_ = g.GenerateMockRecorderMethod(mockType, m)
|
||||
}
|
||||
}
|
||||
|
||||
func makeArgString(argNames, argTypes []string) string {
|
||||
args := make([]string, len(argNames))
|
||||
for i, name := range argNames {
|
||||
// specify the type only once for consecutive args of the same type
|
||||
if i+1 < len(argTypes) && argTypes[i] == argTypes[i+1] {
|
||||
args[i] = name
|
||||
} else {
|
||||
args[i] = name + " " + argTypes[i]
|
||||
}
|
||||
}
|
||||
return strings.Join(args, ", ")
|
||||
}
|
||||
|
||||
// GenerateMockMethod generates a mock method implementation.
|
||||
// If non-empty, pkgOverride is the package in which unqualified types reside.
|
||||
func (g *generator) GenerateMockMethod(mockType string, m *model.Method, pkgOverride string) error {
|
||||
argNames := g.getArgNames(m)
|
||||
argTypes := g.getArgTypes(m, pkgOverride)
|
||||
argString := makeArgString(argNames, argTypes)
|
||||
|
||||
rets := make([]string, len(m.Out))
|
||||
for i, p := range m.Out {
|
||||
rets[i] = p.Type.String(g.packageMap, pkgOverride)
|
||||
}
|
||||
retString := strings.Join(rets, ", ")
|
||||
if len(rets) > 1 {
|
||||
retString = "(" + retString + ")"
|
||||
}
|
||||
if retString != "" {
|
||||
retString = " " + retString
|
||||
}
|
||||
|
||||
ia := newIdentifierAllocator(argNames)
|
||||
idRecv := ia.allocateIdentifier("m")
|
||||
|
||||
g.p("// %v mocks base method.", m.Name)
|
||||
g.p("func (%v *%v) %v(%v)%v {", idRecv, mockType, m.Name, argString, retString)
|
||||
g.in()
|
||||
g.p("%s.ctrl.T.Helper()", idRecv)
|
||||
|
||||
var callArgs string
|
||||
if m.Variadic == nil {
|
||||
if len(argNames) > 0 {
|
||||
callArgs = ", " + strings.Join(argNames, ", ")
|
||||
}
|
||||
} else {
|
||||
// Non-trivial. The generated code must build a []interface{},
|
||||
// but the variadic argument may be any type.
|
||||
idVarArgs := ia.allocateIdentifier("varargs")
|
||||
idVArg := ia.allocateIdentifier("a")
|
||||
g.p("%s := []interface{}{%s}", idVarArgs, strings.Join(argNames[:len(argNames)-1], ", "))
|
||||
g.p("for _, %s := range %s {", idVArg, argNames[len(argNames)-1])
|
||||
g.in()
|
||||
g.p("%s = append(%s, %s)", idVarArgs, idVarArgs, idVArg)
|
||||
g.out()
|
||||
g.p("}")
|
||||
callArgs = ", " + idVarArgs + "..."
|
||||
}
|
||||
if len(m.Out) == 0 {
|
||||
g.p(`%v.ctrl.Call(%v, %q%v)`, idRecv, idRecv, m.Name, callArgs)
|
||||
} else {
|
||||
idRet := ia.allocateIdentifier("ret")
|
||||
g.p(`%v := %v.ctrl.Call(%v, %q%v)`, idRet, idRecv, idRecv, m.Name, callArgs)
|
||||
|
||||
// Go does not allow "naked" type assertions on nil values, so we use the two-value form here.
|
||||
// The value of that is either (x.(T), true) or (Z, false), where Z is the zero value for T.
|
||||
// Happily, this coincides with the semantics we want here.
|
||||
retNames := make([]string, len(rets))
|
||||
for i, t := range rets {
|
||||
retNames[i] = ia.allocateIdentifier(fmt.Sprintf("ret%d", i))
|
||||
g.p("%s, _ := %s[%d].(%s)", retNames[i], idRet, i, t)
|
||||
}
|
||||
g.p("return " + strings.Join(retNames, ", "))
|
||||
}
|
||||
|
||||
g.out()
|
||||
g.p("}")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *generator) GenerateMockRecorderMethod(mockType string, m *model.Method) error {
|
||||
argNames := g.getArgNames(m)
|
||||
|
||||
var argString string
|
||||
if m.Variadic == nil {
|
||||
argString = strings.Join(argNames, ", ")
|
||||
} else {
|
||||
argString = strings.Join(argNames[:len(argNames)-1], ", ")
|
||||
}
|
||||
if argString != "" {
|
||||
argString += " interface{}"
|
||||
}
|
||||
|
||||
if m.Variadic != nil {
|
||||
if argString != "" {
|
||||
argString += ", "
|
||||
}
|
||||
argString += fmt.Sprintf("%s ...interface{}", argNames[len(argNames)-1])
|
||||
}
|
||||
|
||||
ia := newIdentifierAllocator(argNames)
|
||||
idRecv := ia.allocateIdentifier("mr")
|
||||
|
||||
g.p("// %v indicates an expected call of %v.", m.Name, m.Name)
|
||||
g.p("func (%s *%vMockRecorder) %v(%v) *gomock.Call {", idRecv, mockType, m.Name, argString)
|
||||
g.in()
|
||||
g.p("%s.mock.ctrl.T.Helper()", idRecv)
|
||||
|
||||
var callArgs string
|
||||
if m.Variadic == nil {
|
||||
if len(argNames) > 0 {
|
||||
callArgs = ", " + strings.Join(argNames, ", ")
|
||||
}
|
||||
} else {
|
||||
if len(argNames) == 1 {
|
||||
// Easy: just use ... to push the arguments through.
|
||||
callArgs = ", " + argNames[0] + "..."
|
||||
} else {
|
||||
// Hard: create a temporary slice.
|
||||
idVarArgs := ia.allocateIdentifier("varargs")
|
||||
g.p("%s := append([]interface{}{%s}, %s...)",
|
||||
idVarArgs,
|
||||
strings.Join(argNames[:len(argNames)-1], ", "),
|
||||
argNames[len(argNames)-1])
|
||||
callArgs = ", " + idVarArgs + "..."
|
||||
}
|
||||
}
|
||||
g.p(`return %s.mock.ctrl.RecordCallWithMethodType(%s.mock, "%s", reflect.TypeOf((*%s)(nil).%s)%s)`, idRecv, idRecv, m.Name, mockType, m.Name, callArgs)
|
||||
|
||||
g.out()
|
||||
g.p("}")
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *generator) getArgNames(m *model.Method) []string {
|
||||
argNames := make([]string, len(m.In))
|
||||
for i, p := range m.In {
|
||||
name := p.Name
|
||||
if name == "" || name == "_" {
|
||||
name = fmt.Sprintf("arg%d", i)
|
||||
}
|
||||
argNames[i] = name
|
||||
}
|
||||
if m.Variadic != nil {
|
||||
name := m.Variadic.Name
|
||||
if name == "" {
|
||||
name = fmt.Sprintf("arg%d", len(m.In))
|
||||
}
|
||||
argNames = append(argNames, name)
|
||||
}
|
||||
return argNames
|
||||
}
|
||||
|
||||
func (g *generator) getArgTypes(m *model.Method, pkgOverride string) []string {
|
||||
argTypes := make([]string, len(m.In))
|
||||
for i, p := range m.In {
|
||||
argTypes[i] = p.Type.String(g.packageMap, pkgOverride)
|
||||
}
|
||||
if m.Variadic != nil {
|
||||
argTypes = append(argTypes, "..."+m.Variadic.Type.String(g.packageMap, pkgOverride))
|
||||
}
|
||||
return argTypes
|
||||
}
|
||||
|
||||
type identifierAllocator map[string]struct{}
|
||||
|
||||
func newIdentifierAllocator(taken []string) identifierAllocator {
|
||||
a := make(identifierAllocator, len(taken))
|
||||
for _, s := range taken {
|
||||
a[s] = struct{}{}
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
func (o identifierAllocator) allocateIdentifier(want string) string {
|
||||
id := want
|
||||
for i := 2; ; i++ {
|
||||
if _, ok := o[id]; !ok {
|
||||
o[id] = struct{}{}
|
||||
return id
|
||||
}
|
||||
id = want + "_" + strconv.Itoa(i)
|
||||
}
|
||||
}
|
||||
|
||||
// Output returns the generator's output, formatted in the standard Go style.
|
||||
func (g *generator) Output() []byte {
|
||||
src, err := toolsimports.Process(g.destination, g.buf.Bytes(), nil)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to format generated source code: %s\n%s", err, g.buf.String())
|
||||
}
|
||||
return src
|
||||
}
|
||||
|
||||
// createPackageMap returns a map of import path to package name
|
||||
// for specified importPaths.
|
||||
func createPackageMap(importPaths []string) map[string]string {
|
||||
var pkg struct {
|
||||
Name string
|
||||
ImportPath string
|
||||
}
|
||||
pkgMap := make(map[string]string)
|
||||
b := bytes.NewBuffer(nil)
|
||||
args := []string{"list", "-json"}
|
||||
args = append(args, importPaths...)
|
||||
cmd := exec.Command("go", args...)
|
||||
cmd.Stdout = b
|
||||
cmd.Run()
|
||||
dec := json.NewDecoder(b)
|
||||
for dec.More() {
|
||||
err := dec.Decode(&pkg)
|
||||
if err != nil {
|
||||
log.Printf("failed to decode 'go list' output: %v", err)
|
||||
continue
|
||||
}
|
||||
pkgMap[pkg.ImportPath] = pkg.Name
|
||||
}
|
||||
return pkgMap
|
||||
}
|
||||
|
||||
func printVersion() {
|
||||
if version != "" {
|
||||
fmt.Printf("v%s\nCommit: %s\nDate: %s\n", version, commit, date)
|
||||
} else {
|
||||
printModuleVersion()
|
||||
}
|
||||
}
|
||||
|
||||
// parseImportPackage get package import path via source file
|
||||
// an alternative implementation is to use:
|
||||
// cfg := &packages.Config{Mode: packages.NeedName, Tests: true, Dir: srcDir}
|
||||
// pkgs, err := packages.Load(cfg, "file="+source)
|
||||
// However, it will call "go list" and slow down the performance
|
||||
func parsePackageImport(srcDir string) (string, error) {
|
||||
moduleMode := os.Getenv("GO111MODULE")
|
||||
// trying to find the module
|
||||
if moduleMode != "off" {
|
||||
currentDir := srcDir
|
||||
for {
|
||||
dat, err := ioutil.ReadFile(filepath.Join(currentDir, "go.mod"))
|
||||
if os.IsNotExist(err) {
|
||||
if currentDir == filepath.Dir(currentDir) {
|
||||
// at the root
|
||||
break
|
||||
}
|
||||
currentDir = filepath.Dir(currentDir)
|
||||
continue
|
||||
} else if err != nil {
|
||||
return "", err
|
||||
}
|
||||
modulePath := modfile.ModulePath(dat)
|
||||
return filepath.ToSlash(filepath.Join(modulePath, strings.TrimPrefix(srcDir, currentDir))), nil
|
||||
}
|
||||
}
|
||||
// fall back to GOPATH mode
|
||||
goPaths := os.Getenv("GOPATH")
|
||||
if goPaths == "" {
|
||||
return "", fmt.Errorf("GOPATH is not set")
|
||||
}
|
||||
goPathList := strings.Split(goPaths, string(os.PathListSeparator))
|
||||
for _, goPath := range goPathList {
|
||||
sourceRoot := filepath.Join(goPath, "src") + string(os.PathSeparator)
|
||||
if strings.HasPrefix(srcDir, sourceRoot) {
|
||||
return filepath.ToSlash(strings.TrimPrefix(srcDir, sourceRoot)), nil
|
||||
}
|
||||
}
|
||||
return "", errOutsideGoPath
|
||||
}
|
|
@ -0,0 +1,495 @@
|
|||
// Copyright 2012 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package model contains the data model necessary for generating mock implementations.
|
||||
package model
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// pkgPath is the importable path for package model
|
||||
const pkgPath = "github.com/golang/mock/mockgen/model"
|
||||
|
||||
// Package is a Go package. It may be a subset.
|
||||
type Package struct {
|
||||
Name string
|
||||
PkgPath string
|
||||
Interfaces []*Interface
|
||||
DotImports []string
|
||||
}
|
||||
|
||||
// Print writes the package name and its exported interfaces.
|
||||
func (pkg *Package) Print(w io.Writer) {
|
||||
_, _ = fmt.Fprintf(w, "package %s\n", pkg.Name)
|
||||
for _, intf := range pkg.Interfaces {
|
||||
intf.Print(w)
|
||||
}
|
||||
}
|
||||
|
||||
// Imports returns the imports needed by the Package as a set of import paths.
|
||||
func (pkg *Package) Imports() map[string]bool {
|
||||
im := make(map[string]bool)
|
||||
for _, intf := range pkg.Interfaces {
|
||||
intf.addImports(im)
|
||||
}
|
||||
return im
|
||||
}
|
||||
|
||||
// Interface is a Go interface.
|
||||
type Interface struct {
|
||||
Name string
|
||||
Methods []*Method
|
||||
}
|
||||
|
||||
// Print writes the interface name and its methods.
|
||||
func (intf *Interface) Print(w io.Writer) {
|
||||
_, _ = fmt.Fprintf(w, "interface %s\n", intf.Name)
|
||||
for _, m := range intf.Methods {
|
||||
m.Print(w)
|
||||
}
|
||||
}
|
||||
|
||||
func (intf *Interface) addImports(im map[string]bool) {
|
||||
for _, m := range intf.Methods {
|
||||
m.addImports(im)
|
||||
}
|
||||
}
|
||||
|
||||
// AddMethod adds a new method, de-duplicating by method name.
|
||||
func (intf *Interface) AddMethod(m *Method) {
|
||||
for _, me := range intf.Methods {
|
||||
if me.Name == m.Name {
|
||||
return
|
||||
}
|
||||
}
|
||||
intf.Methods = append(intf.Methods, m)
|
||||
}
|
||||
|
||||
// Method is a single method of an interface.
|
||||
type Method struct {
|
||||
Name string
|
||||
In, Out []*Parameter
|
||||
Variadic *Parameter // may be nil
|
||||
}
|
||||
|
||||
// Print writes the method name and its signature.
|
||||
func (m *Method) Print(w io.Writer) {
|
||||
_, _ = fmt.Fprintf(w, " - method %s\n", m.Name)
|
||||
if len(m.In) > 0 {
|
||||
_, _ = fmt.Fprintf(w, " in:\n")
|
||||
for _, p := range m.In {
|
||||
p.Print(w)
|
||||
}
|
||||
}
|
||||
if m.Variadic != nil {
|
||||
_, _ = fmt.Fprintf(w, " ...:\n")
|
||||
m.Variadic.Print(w)
|
||||
}
|
||||
if len(m.Out) > 0 {
|
||||
_, _ = fmt.Fprintf(w, " out:\n")
|
||||
for _, p := range m.Out {
|
||||
p.Print(w)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Method) addImports(im map[string]bool) {
|
||||
for _, p := range m.In {
|
||||
p.Type.addImports(im)
|
||||
}
|
||||
if m.Variadic != nil {
|
||||
m.Variadic.Type.addImports(im)
|
||||
}
|
||||
for _, p := range m.Out {
|
||||
p.Type.addImports(im)
|
||||
}
|
||||
}
|
||||
|
||||
// Parameter is an argument or return parameter of a method.
|
||||
type Parameter struct {
|
||||
Name string // may be empty
|
||||
Type Type
|
||||
}
|
||||
|
||||
// Print writes a method parameter.
|
||||
func (p *Parameter) Print(w io.Writer) {
|
||||
n := p.Name
|
||||
if n == "" {
|
||||
n = `""`
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, " - %v: %v\n", n, p.Type.String(nil, ""))
|
||||
}
|
||||
|
||||
// Type is a Go type.
|
||||
type Type interface {
|
||||
String(pm map[string]string, pkgOverride string) string
|
||||
addImports(im map[string]bool)
|
||||
}
|
||||
|
||||
func init() {
|
||||
gob.Register(&ArrayType{})
|
||||
gob.Register(&ChanType{})
|
||||
gob.Register(&FuncType{})
|
||||
gob.Register(&MapType{})
|
||||
gob.Register(&NamedType{})
|
||||
gob.Register(&PointerType{})
|
||||
|
||||
// Call gob.RegisterName to make sure it has the consistent name registered
|
||||
// for both gob decoder and encoder.
|
||||
//
|
||||
// For a non-pointer type, gob.Register will try to get package full path by
|
||||
// calling rt.PkgPath() for a name to register. If your project has vendor
|
||||
// directory, it is possible that PkgPath will get a path like this:
|
||||
// ../../../vendor/github.com/golang/mock/mockgen/model
|
||||
gob.RegisterName(pkgPath+".PredeclaredType", PredeclaredType(""))
|
||||
}
|
||||
|
||||
// ArrayType is an array or slice type.
|
||||
type ArrayType struct {
|
||||
Len int // -1 for slices, >= 0 for arrays
|
||||
Type Type
|
||||
}
|
||||
|
||||
func (at *ArrayType) String(pm map[string]string, pkgOverride string) string {
|
||||
s := "[]"
|
||||
if at.Len > -1 {
|
||||
s = fmt.Sprintf("[%d]", at.Len)
|
||||
}
|
||||
return s + at.Type.String(pm, pkgOverride)
|
||||
}
|
||||
|
||||
func (at *ArrayType) addImports(im map[string]bool) { at.Type.addImports(im) }
|
||||
|
||||
// ChanType is a channel type.
|
||||
type ChanType struct {
|
||||
Dir ChanDir // 0, 1 or 2
|
||||
Type Type
|
||||
}
|
||||
|
||||
func (ct *ChanType) String(pm map[string]string, pkgOverride string) string {
|
||||
s := ct.Type.String(pm, pkgOverride)
|
||||
if ct.Dir == RecvDir {
|
||||
return "<-chan " + s
|
||||
}
|
||||
if ct.Dir == SendDir {
|
||||
return "chan<- " + s
|
||||
}
|
||||
return "chan " + s
|
||||
}
|
||||
|
||||
func (ct *ChanType) addImports(im map[string]bool) { ct.Type.addImports(im) }
|
||||
|
||||
// ChanDir is a channel direction.
|
||||
type ChanDir int
|
||||
|
||||
// Constants for channel directions.
|
||||
const (
|
||||
RecvDir ChanDir = 1
|
||||
SendDir ChanDir = 2
|
||||
)
|
||||
|
||||
// FuncType is a function type.
|
||||
type FuncType struct {
|
||||
In, Out []*Parameter
|
||||
Variadic *Parameter // may be nil
|
||||
}
|
||||
|
||||
func (ft *FuncType) String(pm map[string]string, pkgOverride string) string {
|
||||
args := make([]string, len(ft.In))
|
||||
for i, p := range ft.In {
|
||||
args[i] = p.Type.String(pm, pkgOverride)
|
||||
}
|
||||
if ft.Variadic != nil {
|
||||
args = append(args, "..."+ft.Variadic.Type.String(pm, pkgOverride))
|
||||
}
|
||||
rets := make([]string, len(ft.Out))
|
||||
for i, p := range ft.Out {
|
||||
rets[i] = p.Type.String(pm, pkgOverride)
|
||||
}
|
||||
retString := strings.Join(rets, ", ")
|
||||
if nOut := len(ft.Out); nOut == 1 {
|
||||
retString = " " + retString
|
||||
} else if nOut > 1 {
|
||||
retString = " (" + retString + ")"
|
||||
}
|
||||
return "func(" + strings.Join(args, ", ") + ")" + retString
|
||||
}
|
||||
|
||||
func (ft *FuncType) addImports(im map[string]bool) {
|
||||
for _, p := range ft.In {
|
||||
p.Type.addImports(im)
|
||||
}
|
||||
if ft.Variadic != nil {
|
||||
ft.Variadic.Type.addImports(im)
|
||||
}
|
||||
for _, p := range ft.Out {
|
||||
p.Type.addImports(im)
|
||||
}
|
||||
}
|
||||
|
||||
// MapType is a map type.
|
||||
type MapType struct {
|
||||
Key, Value Type
|
||||
}
|
||||
|
||||
func (mt *MapType) String(pm map[string]string, pkgOverride string) string {
|
||||
return "map[" + mt.Key.String(pm, pkgOverride) + "]" + mt.Value.String(pm, pkgOverride)
|
||||
}
|
||||
|
||||
func (mt *MapType) addImports(im map[string]bool) {
|
||||
mt.Key.addImports(im)
|
||||
mt.Value.addImports(im)
|
||||
}
|
||||
|
||||
// NamedType is an exported type in a package.
|
||||
type NamedType struct {
|
||||
Package string // may be empty
|
||||
Type string
|
||||
}
|
||||
|
||||
func (nt *NamedType) String(pm map[string]string, pkgOverride string) string {
|
||||
if pkgOverride == nt.Package {
|
||||
return nt.Type
|
||||
}
|
||||
prefix := pm[nt.Package]
|
||||
if prefix != "" {
|
||||
return prefix + "." + nt.Type
|
||||
}
|
||||
|
||||
return nt.Type
|
||||
}
|
||||
|
||||
func (nt *NamedType) addImports(im map[string]bool) {
|
||||
if nt.Package != "" {
|
||||
im[nt.Package] = true
|
||||
}
|
||||
}
|
||||
|
||||
// PointerType is a pointer to another type.
|
||||
type PointerType struct {
|
||||
Type Type
|
||||
}
|
||||
|
||||
func (pt *PointerType) String(pm map[string]string, pkgOverride string) string {
|
||||
return "*" + pt.Type.String(pm, pkgOverride)
|
||||
}
|
||||
func (pt *PointerType) addImports(im map[string]bool) { pt.Type.addImports(im) }
|
||||
|
||||
// PredeclaredType is a predeclared type such as "int".
|
||||
type PredeclaredType string
|
||||
|
||||
func (pt PredeclaredType) String(map[string]string, string) string { return string(pt) }
|
||||
func (pt PredeclaredType) addImports(map[string]bool) {}
|
||||
|
||||
// The following code is intended to be called by the program generated by ../reflect.go.
|
||||
|
||||
// InterfaceFromInterfaceType returns a pointer to an interface for the
|
||||
// given reflection interface type.
|
||||
func InterfaceFromInterfaceType(it reflect.Type) (*Interface, error) {
|
||||
if it.Kind() != reflect.Interface {
|
||||
return nil, fmt.Errorf("%v is not an interface", it)
|
||||
}
|
||||
intf := &Interface{}
|
||||
|
||||
for i := 0; i < it.NumMethod(); i++ {
|
||||
mt := it.Method(i)
|
||||
// TODO: need to skip unexported methods? or just raise an error?
|
||||
m := &Method{
|
||||
Name: mt.Name,
|
||||
}
|
||||
|
||||
var err error
|
||||
m.In, m.Variadic, m.Out, err = funcArgsFromType(mt.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
intf.AddMethod(m)
|
||||
}
|
||||
|
||||
return intf, nil
|
||||
}
|
||||
|
||||
// t's Kind must be a reflect.Func.
|
||||
func funcArgsFromType(t reflect.Type) (in []*Parameter, variadic *Parameter, out []*Parameter, err error) {
|
||||
nin := t.NumIn()
|
||||
if t.IsVariadic() {
|
||||
nin--
|
||||
}
|
||||
var p *Parameter
|
||||
for i := 0; i < nin; i++ {
|
||||
p, err = parameterFromType(t.In(i))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
in = append(in, p)
|
||||
}
|
||||
if t.IsVariadic() {
|
||||
p, err = parameterFromType(t.In(nin).Elem())
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
variadic = p
|
||||
}
|
||||
for i := 0; i < t.NumOut(); i++ {
|
||||
p, err = parameterFromType(t.Out(i))
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
out = append(out, p)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func parameterFromType(t reflect.Type) (*Parameter, error) {
|
||||
tt, err := typeFromType(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Parameter{Type: tt}, nil
|
||||
}
|
||||
|
||||
var errorType = reflect.TypeOf((*error)(nil)).Elem()
|
||||
|
||||
var byteType = reflect.TypeOf(byte(0))
|
||||
|
||||
func typeFromType(t reflect.Type) (Type, error) {
|
||||
// Hack workaround for https://golang.org/issue/3853.
|
||||
// This explicit check should not be necessary.
|
||||
if t == byteType {
|
||||
return PredeclaredType("byte"), nil
|
||||
}
|
||||
|
||||
if imp := t.PkgPath(); imp != "" {
|
||||
return &NamedType{
|
||||
Package: impPath(imp),
|
||||
Type: t.Name(),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// only unnamed or predeclared types after here
|
||||
|
||||
// Lots of types have element types. Let's do the parsing and error checking for all of them.
|
||||
var elemType Type
|
||||
switch t.Kind() {
|
||||
case reflect.Array, reflect.Chan, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
var err error
|
||||
elemType, err = typeFromType(t.Elem())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
switch t.Kind() {
|
||||
case reflect.Array:
|
||||
return &ArrayType{
|
||||
Len: t.Len(),
|
||||
Type: elemType,
|
||||
}, nil
|
||||
case reflect.Bool, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr,
|
||||
reflect.Float32, reflect.Float64, reflect.Complex64, reflect.Complex128, reflect.String:
|
||||
return PredeclaredType(t.Kind().String()), nil
|
||||
case reflect.Chan:
|
||||
var dir ChanDir
|
||||
switch t.ChanDir() {
|
||||
case reflect.RecvDir:
|
||||
dir = RecvDir
|
||||
case reflect.SendDir:
|
||||
dir = SendDir
|
||||
}
|
||||
return &ChanType{
|
||||
Dir: dir,
|
||||
Type: elemType,
|
||||
}, nil
|
||||
case reflect.Func:
|
||||
in, variadic, out, err := funcArgsFromType(t)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FuncType{
|
||||
In: in,
|
||||
Out: out,
|
||||
Variadic: variadic,
|
||||
}, nil
|
||||
case reflect.Interface:
|
||||
// Two special interfaces.
|
||||
if t.NumMethod() == 0 {
|
||||
return PredeclaredType("interface{}"), nil
|
||||
}
|
||||
if t == errorType {
|
||||
return PredeclaredType("error"), nil
|
||||
}
|
||||
case reflect.Map:
|
||||
kt, err := typeFromType(t.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &MapType{
|
||||
Key: kt,
|
||||
Value: elemType,
|
||||
}, nil
|
||||
case reflect.Ptr:
|
||||
return &PointerType{
|
||||
Type: elemType,
|
||||
}, nil
|
||||
case reflect.Slice:
|
||||
return &ArrayType{
|
||||
Len: -1,
|
||||
Type: elemType,
|
||||
}, nil
|
||||
case reflect.Struct:
|
||||
if t.NumField() == 0 {
|
||||
return PredeclaredType("struct{}"), nil
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: Struct, UnsafePointer
|
||||
return nil, fmt.Errorf("can't yet turn %v (%v) into a model.Type", t, t.Kind())
|
||||
}
|
||||
|
||||
// impPath sanitizes the package path returned by `PkgPath` method of a reflect Type so that
|
||||
// it is importable. PkgPath might return a path that includes "vendor". These paths do not
|
||||
// compile, so we need to remove everything up to and including "/vendor/".
|
||||
// See https://github.com/golang/go/issues/12019.
|
||||
func impPath(imp string) string {
|
||||
if strings.HasPrefix(imp, "vendor/") {
|
||||
imp = "/" + imp
|
||||
}
|
||||
if i := strings.LastIndex(imp, "/vendor/"); i != -1 {
|
||||
imp = imp[i+len("/vendor/"):]
|
||||
}
|
||||
return imp
|
||||
}
|
||||
|
||||
// ErrorInterface represent built-in error interface.
|
||||
var ErrorInterface = Interface{
|
||||
Name: "error",
|
||||
Methods: []*Method{
|
||||
{
|
||||
Name: "Error",
|
||||
Out: []*Parameter{
|
||||
{
|
||||
Name: "",
|
||||
Type: PredeclaredType("string"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
|
@ -0,0 +1,644 @@
|
|||
// Copyright 2012 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
// This file contains the model construction by parsing source files.
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/ast"
|
||||
"go/build"
|
||||
"go/importer"
|
||||
"go/parser"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/golang/mock/mockgen/model"
|
||||
)
|
||||
|
||||
var (
|
||||
imports = flag.String("imports", "", "(source mode) Comma-separated name=path pairs of explicit imports to use.")
|
||||
auxFiles = flag.String("aux_files", "", "(source mode) Comma-separated pkg=path pairs of auxiliary Go source files.")
|
||||
)
|
||||
|
||||
// sourceMode generates mocks via source file.
|
||||
func sourceMode(source string) (*model.Package, error) {
|
||||
srcDir, err := filepath.Abs(filepath.Dir(source))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed getting source directory: %v", err)
|
||||
}
|
||||
|
||||
packageImport, err := parsePackageImport(srcDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fs := token.NewFileSet()
|
||||
file, err := parser.ParseFile(fs, source, nil, 0)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed parsing source file %v: %v", source, err)
|
||||
}
|
||||
|
||||
p := &fileParser{
|
||||
fileSet: fs,
|
||||
imports: make(map[string]importedPackage),
|
||||
importedInterfaces: make(map[string]map[string]*ast.InterfaceType),
|
||||
auxInterfaces: make(map[string]map[string]*ast.InterfaceType),
|
||||
srcDir: srcDir,
|
||||
}
|
||||
|
||||
// Handle -imports.
|
||||
dotImports := make(map[string]bool)
|
||||
if *imports != "" {
|
||||
for _, kv := range strings.Split(*imports, ",") {
|
||||
eq := strings.Index(kv, "=")
|
||||
k, v := kv[:eq], kv[eq+1:]
|
||||
if k == "." {
|
||||
dotImports[v] = true
|
||||
} else {
|
||||
p.imports[k] = importedPkg{path: v}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle -aux_files.
|
||||
if err := p.parseAuxFiles(*auxFiles); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p.addAuxInterfacesFromFile(packageImport, file) // this file
|
||||
|
||||
pkg, err := p.parseFile(packageImport, file)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for pkgPath := range dotImports {
|
||||
pkg.DotImports = append(pkg.DotImports, pkgPath)
|
||||
}
|
||||
return pkg, nil
|
||||
}
|
||||
|
||||
type importedPackage interface {
|
||||
Path() string
|
||||
Parser() *fileParser
|
||||
}
|
||||
|
||||
type importedPkg struct {
|
||||
path string
|
||||
parser *fileParser
|
||||
}
|
||||
|
||||
func (i importedPkg) Path() string { return i.path }
|
||||
func (i importedPkg) Parser() *fileParser { return i.parser }
|
||||
|
||||
// duplicateImport is a bit of a misnomer. Currently the parser can't
|
||||
// handle cases of multi-file packages importing different packages
|
||||
// under the same name. Often these imports would not be problematic,
|
||||
// so this type lets us defer raising an error unless the package name
|
||||
// is actually used.
|
||||
type duplicateImport struct {
|
||||
name string
|
||||
duplicates []string
|
||||
}
|
||||
|
||||
func (d duplicateImport) Error() string {
|
||||
return fmt.Sprintf("%q is ambiguous because of duplicate imports: %v", d.name, d.duplicates)
|
||||
}
|
||||
|
||||
func (d duplicateImport) Path() string { log.Fatal(d.Error()); return "" }
|
||||
func (d duplicateImport) Parser() *fileParser { log.Fatal(d.Error()); return nil }
|
||||
|
||||
type fileParser struct {
|
||||
fileSet *token.FileSet
|
||||
imports map[string]importedPackage // package name => imported package
|
||||
importedInterfaces map[string]map[string]*ast.InterfaceType // package (or "") => name => interface
|
||||
|
||||
auxFiles []*ast.File
|
||||
auxInterfaces map[string]map[string]*ast.InterfaceType // package (or "") => name => interface
|
||||
|
||||
srcDir string
|
||||
}
|
||||
|
||||
func (p *fileParser) errorf(pos token.Pos, format string, args ...interface{}) error {
|
||||
ps := p.fileSet.Position(pos)
|
||||
format = "%s:%d:%d: " + format
|
||||
args = append([]interface{}{ps.Filename, ps.Line, ps.Column}, args...)
|
||||
return fmt.Errorf(format, args...)
|
||||
}
|
||||
|
||||
func (p *fileParser) parseAuxFiles(auxFiles string) error {
|
||||
auxFiles = strings.TrimSpace(auxFiles)
|
||||
if auxFiles == "" {
|
||||
return nil
|
||||
}
|
||||
for _, kv := range strings.Split(auxFiles, ",") {
|
||||
parts := strings.SplitN(kv, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return fmt.Errorf("bad aux file spec: %v", kv)
|
||||
}
|
||||
pkg, fpath := parts[0], parts[1]
|
||||
|
||||
file, err := parser.ParseFile(p.fileSet, fpath, nil, 0)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
p.auxFiles = append(p.auxFiles, file)
|
||||
p.addAuxInterfacesFromFile(pkg, file)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *fileParser) addAuxInterfacesFromFile(pkg string, file *ast.File) {
|
||||
if _, ok := p.auxInterfaces[pkg]; !ok {
|
||||
p.auxInterfaces[pkg] = make(map[string]*ast.InterfaceType)
|
||||
}
|
||||
for ni := range iterInterfaces(file) {
|
||||
p.auxInterfaces[pkg][ni.name.Name] = ni.it
|
||||
}
|
||||
}
|
||||
|
||||
// parseFile loads all file imports and auxiliary files import into the
|
||||
// fileParser, parses all file interfaces and returns package model.
|
||||
func (p *fileParser) parseFile(importPath string, file *ast.File) (*model.Package, error) {
|
||||
allImports, dotImports := importsOfFile(file)
|
||||
// Don't stomp imports provided by -imports. Those should take precedence.
|
||||
for pkg, pkgI := range allImports {
|
||||
if _, ok := p.imports[pkg]; !ok {
|
||||
p.imports[pkg] = pkgI
|
||||
}
|
||||
}
|
||||
// Add imports from auxiliary files, which might be needed for embedded interfaces.
|
||||
// Don't stomp any other imports.
|
||||
for _, f := range p.auxFiles {
|
||||
auxImports, _ := importsOfFile(f)
|
||||
for pkg, pkgI := range auxImports {
|
||||
if _, ok := p.imports[pkg]; !ok {
|
||||
p.imports[pkg] = pkgI
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var is []*model.Interface
|
||||
for ni := range iterInterfaces(file) {
|
||||
i, err := p.parseInterface(ni.name.String(), importPath, ni.it)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
is = append(is, i)
|
||||
}
|
||||
return &model.Package{
|
||||
Name: file.Name.String(),
|
||||
PkgPath: importPath,
|
||||
Interfaces: is,
|
||||
DotImports: dotImports,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// parsePackage loads package specified by path, parses it and returns
|
||||
// a new fileParser with the parsed imports and interfaces.
|
||||
func (p *fileParser) parsePackage(path string) (*fileParser, error) {
|
||||
newP := &fileParser{
|
||||
fileSet: token.NewFileSet(),
|
||||
imports: make(map[string]importedPackage),
|
||||
importedInterfaces: make(map[string]map[string]*ast.InterfaceType),
|
||||
auxInterfaces: make(map[string]map[string]*ast.InterfaceType),
|
||||
srcDir: p.srcDir,
|
||||
}
|
||||
|
||||
var pkgs map[string]*ast.Package
|
||||
if imp, err := build.Import(path, newP.srcDir, build.FindOnly); err != nil {
|
||||
return nil, err
|
||||
} else if pkgs, err = parser.ParseDir(newP.fileSet, imp.Dir, nil, 0); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, pkg := range pkgs {
|
||||
file := ast.MergePackageFiles(pkg, ast.FilterFuncDuplicates|ast.FilterUnassociatedComments|ast.FilterImportDuplicates)
|
||||
if _, ok := newP.importedInterfaces[path]; !ok {
|
||||
newP.importedInterfaces[path] = make(map[string]*ast.InterfaceType)
|
||||
}
|
||||
for ni := range iterInterfaces(file) {
|
||||
newP.importedInterfaces[path][ni.name.Name] = ni.it
|
||||
}
|
||||
imports, _ := importsOfFile(file)
|
||||
for pkgName, pkgI := range imports {
|
||||
newP.imports[pkgName] = pkgI
|
||||
}
|
||||
}
|
||||
return newP, nil
|
||||
}
|
||||
|
||||
func (p *fileParser) parseInterface(name, pkg string, it *ast.InterfaceType) (*model.Interface, error) {
|
||||
iface := &model.Interface{Name: name}
|
||||
for _, field := range it.Methods.List {
|
||||
switch v := field.Type.(type) {
|
||||
case *ast.FuncType:
|
||||
if nn := len(field.Names); nn != 1 {
|
||||
return nil, fmt.Errorf("expected one name for interface %v, got %d", iface.Name, nn)
|
||||
}
|
||||
m := &model.Method{
|
||||
Name: field.Names[0].String(),
|
||||
}
|
||||
var err error
|
||||
m.In, m.Variadic, m.Out, err = p.parseFunc(pkg, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
iface.AddMethod(m)
|
||||
case *ast.Ident:
|
||||
// Embedded interface in this package.
|
||||
embeddedIfaceType := p.auxInterfaces[pkg][v.String()]
|
||||
if embeddedIfaceType == nil {
|
||||
embeddedIfaceType = p.importedInterfaces[pkg][v.String()]
|
||||
}
|
||||
|
||||
var embeddedIface *model.Interface
|
||||
if embeddedIfaceType != nil {
|
||||
var err error
|
||||
embeddedIface, err = p.parseInterface(v.String(), pkg, embeddedIfaceType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
// This is built-in error interface.
|
||||
if v.String() == model.ErrorInterface.Name {
|
||||
embeddedIface = &model.ErrorInterface
|
||||
} else {
|
||||
return nil, p.errorf(v.Pos(), "unknown embedded interface %s", v.String())
|
||||
}
|
||||
}
|
||||
// Copy the methods.
|
||||
for _, m := range embeddedIface.Methods {
|
||||
iface.AddMethod(m)
|
||||
}
|
||||
case *ast.SelectorExpr:
|
||||
// Embedded interface in another package.
|
||||
filePkg, sel := v.X.(*ast.Ident).String(), v.Sel.String()
|
||||
embeddedPkg, ok := p.imports[filePkg]
|
||||
if !ok {
|
||||
return nil, p.errorf(v.X.Pos(), "unknown package %s", filePkg)
|
||||
}
|
||||
|
||||
var embeddedIface *model.Interface
|
||||
var err error
|
||||
embeddedIfaceType := p.auxInterfaces[filePkg][sel]
|
||||
if embeddedIfaceType != nil {
|
||||
embeddedIface, err = p.parseInterface(sel, filePkg, embeddedIfaceType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
path := embeddedPkg.Path()
|
||||
parser := embeddedPkg.Parser()
|
||||
if parser == nil {
|
||||
ip, err := p.parsePackage(path)
|
||||
if err != nil {
|
||||
return nil, p.errorf(v.Pos(), "could not parse package %s: %v", path, err)
|
||||
}
|
||||
parser = ip
|
||||
p.imports[filePkg] = importedPkg{
|
||||
path: embeddedPkg.Path(),
|
||||
parser: parser,
|
||||
}
|
||||
}
|
||||
if embeddedIfaceType = parser.importedInterfaces[path][sel]; embeddedIfaceType == nil {
|
||||
return nil, p.errorf(v.Pos(), "unknown embedded interface %s.%s", path, sel)
|
||||
}
|
||||
embeddedIface, err = parser.parseInterface(sel, path, embeddedIfaceType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Copy the methods.
|
||||
// TODO: apply shadowing rules.
|
||||
for _, m := range embeddedIface.Methods {
|
||||
iface.AddMethod(m)
|
||||
}
|
||||
default:
|
||||
return nil, fmt.Errorf("don't know how to mock method of type %T", field.Type)
|
||||
}
|
||||
}
|
||||
return iface, nil
|
||||
}
|
||||
|
||||
func (p *fileParser) parseFunc(pkg string, f *ast.FuncType) (inParam []*model.Parameter, variadic *model.Parameter, outParam []*model.Parameter, err error) {
|
||||
if f.Params != nil {
|
||||
regParams := f.Params.List
|
||||
if isVariadic(f) {
|
||||
n := len(regParams)
|
||||
varParams := regParams[n-1:]
|
||||
regParams = regParams[:n-1]
|
||||
vp, err := p.parseFieldList(pkg, varParams)
|
||||
if err != nil {
|
||||
return nil, nil, nil, p.errorf(varParams[0].Pos(), "failed parsing variadic argument: %v", err)
|
||||
}
|
||||
variadic = vp[0]
|
||||
}
|
||||
inParam, err = p.parseFieldList(pkg, regParams)
|
||||
if err != nil {
|
||||
return nil, nil, nil, p.errorf(f.Pos(), "failed parsing arguments: %v", err)
|
||||
}
|
||||
}
|
||||
if f.Results != nil {
|
||||
outParam, err = p.parseFieldList(pkg, f.Results.List)
|
||||
if err != nil {
|
||||
return nil, nil, nil, p.errorf(f.Pos(), "failed parsing returns: %v", err)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (p *fileParser) parseFieldList(pkg string, fields []*ast.Field) ([]*model.Parameter, error) {
|
||||
nf := 0
|
||||
for _, f := range fields {
|
||||
nn := len(f.Names)
|
||||
if nn == 0 {
|
||||
nn = 1 // anonymous parameter
|
||||
}
|
||||
nf += nn
|
||||
}
|
||||
if nf == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
ps := make([]*model.Parameter, nf)
|
||||
i := 0 // destination index
|
||||
for _, f := range fields {
|
||||
t, err := p.parseType(pkg, f.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(f.Names) == 0 {
|
||||
// anonymous arg
|
||||
ps[i] = &model.Parameter{Type: t}
|
||||
i++
|
||||
continue
|
||||
}
|
||||
for _, name := range f.Names {
|
||||
ps[i] = &model.Parameter{Name: name.Name, Type: t}
|
||||
i++
|
||||
}
|
||||
}
|
||||
return ps, nil
|
||||
}
|
||||
|
||||
func (p *fileParser) parseType(pkg string, typ ast.Expr) (model.Type, error) {
|
||||
switch v := typ.(type) {
|
||||
case *ast.ArrayType:
|
||||
ln := -1
|
||||
if v.Len != nil {
|
||||
var value string
|
||||
switch val := v.Len.(type) {
|
||||
case (*ast.BasicLit):
|
||||
value = val.Value
|
||||
case (*ast.Ident):
|
||||
// when the length is a const defined locally
|
||||
value = val.Obj.Decl.(*ast.ValueSpec).Values[0].(*ast.BasicLit).Value
|
||||
case (*ast.SelectorExpr):
|
||||
// when the length is a const defined in an external package
|
||||
usedPkg, err := importer.Default().Import(fmt.Sprintf("%s", val.X))
|
||||
if err != nil {
|
||||
return nil, p.errorf(v.Len.Pos(), "unknown package in array length: %v", err)
|
||||
}
|
||||
ev, err := types.Eval(token.NewFileSet(), usedPkg, token.NoPos, val.Sel.Name)
|
||||
if err != nil {
|
||||
return nil, p.errorf(v.Len.Pos(), "unknown constant in array length: %v", err)
|
||||
}
|
||||
value = ev.Value.String()
|
||||
}
|
||||
|
||||
x, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return nil, p.errorf(v.Len.Pos(), "bad array size: %v", err)
|
||||
}
|
||||
ln = x
|
||||
}
|
||||
t, err := p.parseType(pkg, v.Elt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.ArrayType{Len: ln, Type: t}, nil
|
||||
case *ast.ChanType:
|
||||
t, err := p.parseType(pkg, v.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var dir model.ChanDir
|
||||
if v.Dir == ast.SEND {
|
||||
dir = model.SendDir
|
||||
}
|
||||
if v.Dir == ast.RECV {
|
||||
dir = model.RecvDir
|
||||
}
|
||||
return &model.ChanType{Dir: dir, Type: t}, nil
|
||||
case *ast.Ellipsis:
|
||||
// assume we're parsing a variadic argument
|
||||
return p.parseType(pkg, v.Elt)
|
||||
case *ast.FuncType:
|
||||
in, variadic, out, err := p.parseFunc(pkg, v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.FuncType{In: in, Out: out, Variadic: variadic}, nil
|
||||
case *ast.Ident:
|
||||
if v.IsExported() {
|
||||
// `pkg` may be an aliased imported pkg
|
||||
// if so, patch the import w/ the fully qualified import
|
||||
maybeImportedPkg, ok := p.imports[pkg]
|
||||
if ok {
|
||||
pkg = maybeImportedPkg.Path()
|
||||
}
|
||||
// assume type in this package
|
||||
return &model.NamedType{Package: pkg, Type: v.Name}, nil
|
||||
}
|
||||
|
||||
// assume predeclared type
|
||||
return model.PredeclaredType(v.Name), nil
|
||||
case *ast.InterfaceType:
|
||||
if v.Methods != nil && len(v.Methods.List) > 0 {
|
||||
return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed interface types")
|
||||
}
|
||||
return model.PredeclaredType("interface{}"), nil
|
||||
case *ast.MapType:
|
||||
key, err := p.parseType(pkg, v.Key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
value, err := p.parseType(pkg, v.Value)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.MapType{Key: key, Value: value}, nil
|
||||
case *ast.SelectorExpr:
|
||||
pkgName := v.X.(*ast.Ident).String()
|
||||
pkg, ok := p.imports[pkgName]
|
||||
if !ok {
|
||||
return nil, p.errorf(v.Pos(), "unknown package %q", pkgName)
|
||||
}
|
||||
return &model.NamedType{Package: pkg.Path(), Type: v.Sel.String()}, nil
|
||||
case *ast.StarExpr:
|
||||
t, err := p.parseType(pkg, v.X)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &model.PointerType{Type: t}, nil
|
||||
case *ast.StructType:
|
||||
if v.Fields != nil && len(v.Fields.List) > 0 {
|
||||
return nil, p.errorf(v.Pos(), "can't handle non-empty unnamed struct types")
|
||||
}
|
||||
return model.PredeclaredType("struct{}"), nil
|
||||
case *ast.ParenExpr:
|
||||
return p.parseType(pkg, v.X)
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("don't know how to parse type %T", typ)
|
||||
}
|
||||
|
||||
// importsOfFile returns a map of package name to import path
|
||||
// of the imports in file.
|
||||
func importsOfFile(file *ast.File) (normalImports map[string]importedPackage, dotImports []string) {
|
||||
var importPaths []string
|
||||
for _, is := range file.Imports {
|
||||
if is.Name != nil {
|
||||
continue
|
||||
}
|
||||
importPath := is.Path.Value[1 : len(is.Path.Value)-1] // remove quotes
|
||||
importPaths = append(importPaths, importPath)
|
||||
}
|
||||
packagesName := createPackageMap(importPaths)
|
||||
normalImports = make(map[string]importedPackage)
|
||||
dotImports = make([]string, 0)
|
||||
for _, is := range file.Imports {
|
||||
var pkgName string
|
||||
importPath := is.Path.Value[1 : len(is.Path.Value)-1] // remove quotes
|
||||
|
||||
if is.Name != nil {
|
||||
// Named imports are always certain.
|
||||
if is.Name.Name == "_" {
|
||||
continue
|
||||
}
|
||||
pkgName = is.Name.Name
|
||||
} else {
|
||||
pkg, ok := packagesName[importPath]
|
||||
if !ok {
|
||||
// Fallback to import path suffix. Note that this is uncertain.
|
||||
_, last := path.Split(importPath)
|
||||
// If the last path component has dots, the first dot-delimited
|
||||
// field is used as the name.
|
||||
pkgName = strings.SplitN(last, ".", 2)[0]
|
||||
} else {
|
||||
pkgName = pkg
|
||||
}
|
||||
}
|
||||
|
||||
if pkgName == "." {
|
||||
dotImports = append(dotImports, importPath)
|
||||
} else {
|
||||
if pkg, ok := normalImports[pkgName]; ok {
|
||||
switch p := pkg.(type) {
|
||||
case duplicateImport:
|
||||
normalImports[pkgName] = duplicateImport{
|
||||
name: p.name,
|
||||
duplicates: append([]string{importPath}, p.duplicates...),
|
||||
}
|
||||
case importedPkg:
|
||||
normalImports[pkgName] = duplicateImport{
|
||||
name: pkgName,
|
||||
duplicates: []string{p.path, importPath},
|
||||
}
|
||||
}
|
||||
} else {
|
||||
normalImports[pkgName] = importedPkg{path: importPath}
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type namedInterface struct {
|
||||
name *ast.Ident
|
||||
it *ast.InterfaceType
|
||||
}
|
||||
|
||||
// Create an iterator over all interfaces in file.
|
||||
func iterInterfaces(file *ast.File) <-chan namedInterface {
|
||||
ch := make(chan namedInterface)
|
||||
go func() {
|
||||
for _, decl := range file.Decls {
|
||||
gd, ok := decl.(*ast.GenDecl)
|
||||
if !ok || gd.Tok != token.TYPE {
|
||||
continue
|
||||
}
|
||||
for _, spec := range gd.Specs {
|
||||
ts, ok := spec.(*ast.TypeSpec)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
it, ok := ts.Type.(*ast.InterfaceType)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
|
||||
ch <- namedInterface{ts.Name, it}
|
||||
}
|
||||
}
|
||||
close(ch)
|
||||
}()
|
||||
return ch
|
||||
}
|
||||
|
||||
// isVariadic returns whether the function is variadic.
|
||||
func isVariadic(f *ast.FuncType) bool {
|
||||
nargs := len(f.Params.List)
|
||||
if nargs == 0 {
|
||||
return false
|
||||
}
|
||||
_, ok := f.Params.List[nargs-1].Type.(*ast.Ellipsis)
|
||||
return ok
|
||||
}
|
||||
|
||||
// packageNameOfDir get package import path via dir
|
||||
func packageNameOfDir(srcDir string) (string, error) {
|
||||
files, err := ioutil.ReadDir(srcDir)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var goFilePath string
|
||||
for _, file := range files {
|
||||
if !file.IsDir() && strings.HasSuffix(file.Name(), ".go") {
|
||||
goFilePath = file.Name()
|
||||
break
|
||||
}
|
||||
}
|
||||
if goFilePath == "" {
|
||||
return "", fmt.Errorf("go source file not found %s", srcDir)
|
||||
}
|
||||
|
||||
packageImport, err := parsePackageImport(srcDir)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return packageImport, nil
|
||||
}
|
||||
|
||||
var errOutsideGoPath = errors.New("source directory is outside GOPATH")
|
|
@ -0,0 +1,256 @@
|
|||
// Copyright 2012 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package main
|
||||
|
||||
// This file contains the model construction by reflection.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/gob"
|
||||
"flag"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"text/template"
|
||||
|
||||
"github.com/golang/mock/mockgen/model"
|
||||
)
|
||||
|
||||
var (
|
||||
progOnly = flag.Bool("prog_only", false, "(reflect mode) Only generate the reflection program; write it to stdout and exit.")
|
||||
execOnly = flag.String("exec_only", "", "(reflect mode) If set, execute this reflection program.")
|
||||
buildFlags = flag.String("build_flags", "", "(reflect mode) Additional flags for go build.")
|
||||
)
|
||||
|
||||
// reflectMode generates mocks via reflection on an interface.
|
||||
func reflectMode(importPath string, symbols []string) (*model.Package, error) {
|
||||
if *execOnly != "" {
|
||||
return run(*execOnly)
|
||||
}
|
||||
|
||||
program, err := writeProgram(importPath, symbols)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if *progOnly {
|
||||
if _, err := os.Stdout.Write(program); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
wd, _ := os.Getwd()
|
||||
|
||||
// Try to run the reflection program in the current working directory.
|
||||
if p, err := runInDir(program, wd); err == nil {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Try to run the program in the same directory as the input package.
|
||||
if p, err := build.Import(importPath, wd, build.FindOnly); err == nil {
|
||||
dir := p.Dir
|
||||
if p, err := runInDir(program, dir); err == nil {
|
||||
return p, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Try to run it in a standard temp directory.
|
||||
return runInDir(program, "")
|
||||
}
|
||||
|
||||
func writeProgram(importPath string, symbols []string) ([]byte, error) {
|
||||
var program bytes.Buffer
|
||||
data := reflectData{
|
||||
ImportPath: importPath,
|
||||
Symbols: symbols,
|
||||
}
|
||||
if err := reflectProgram.Execute(&program, &data); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return program.Bytes(), nil
|
||||
}
|
||||
|
||||
// run the given program and parse the output as a model.Package.
|
||||
func run(program string) (*model.Package, error) {
|
||||
f, err := ioutil.TempFile("", "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filename := f.Name()
|
||||
defer os.Remove(filename)
|
||||
if err := f.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Run the program.
|
||||
cmd := exec.Command(program, "-output", filename)
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err = os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Process output.
|
||||
var pkg model.Package
|
||||
if err := gob.NewDecoder(f).Decode(&pkg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &pkg, nil
|
||||
}
|
||||
|
||||
// runInDir writes the given program into the given dir, runs it there, and
|
||||
// parses the output as a model.Package.
|
||||
func runInDir(program []byte, dir string) (*model.Package, error) {
|
||||
// We use TempDir instead of TempFile so we can control the filename.
|
||||
tmpDir, err := ioutil.TempDir(dir, "gomock_reflect_")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer func() {
|
||||
if err := os.RemoveAll(tmpDir); err != nil {
|
||||
log.Printf("failed to remove temp directory: %s", err)
|
||||
}
|
||||
}()
|
||||
const progSource = "prog.go"
|
||||
var progBinary = "prog.bin"
|
||||
if runtime.GOOS == "windows" {
|
||||
// Windows won't execute a program unless it has a ".exe" suffix.
|
||||
progBinary += ".exe"
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(filepath.Join(tmpDir, progSource), program, 0600); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cmdArgs := []string{}
|
||||
cmdArgs = append(cmdArgs, "build")
|
||||
if *buildFlags != "" {
|
||||
cmdArgs = append(cmdArgs, strings.Split(*buildFlags, " ")...)
|
||||
}
|
||||
cmdArgs = append(cmdArgs, "-o", progBinary, progSource)
|
||||
|
||||
// Build the program.
|
||||
buf := bytes.NewBuffer(nil)
|
||||
cmd := exec.Command("go", cmdArgs...)
|
||||
cmd.Dir = tmpDir
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = io.MultiWriter(os.Stderr, buf)
|
||||
if err := cmd.Run(); err != nil {
|
||||
sErr := buf.String()
|
||||
if strings.Contains(sErr, `cannot find package "."`) &&
|
||||
strings.Contains(sErr, "github.com/golang/mock/mockgen/model") {
|
||||
fmt.Fprint(os.Stderr, "Please reference the steps in the README to fix this error:\n\thttps://github.com/golang/mock#reflect-vendoring-error.")
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return run(filepath.Join(tmpDir, progBinary))
|
||||
}
|
||||
|
||||
type reflectData struct {
|
||||
ImportPath string
|
||||
Symbols []string
|
||||
}
|
||||
|
||||
// This program reflects on an interface value, and prints the
|
||||
// gob encoding of a model.Package to standard output.
|
||||
// JSON doesn't work because of the model.Type interface.
|
||||
var reflectProgram = template.Must(template.New("program").Parse(`
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/gob"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"reflect"
|
||||
|
||||
"github.com/golang/mock/mockgen/model"
|
||||
|
||||
pkg_ {{printf "%q" .ImportPath}}
|
||||
)
|
||||
|
||||
var output = flag.String("output", "", "The output file name, or empty to use stdout.")
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
its := []struct{
|
||||
sym string
|
||||
typ reflect.Type
|
||||
}{
|
||||
{{range .Symbols}}
|
||||
{ {{printf "%q" .}}, reflect.TypeOf((*pkg_.{{.}})(nil)).Elem()},
|
||||
{{end}}
|
||||
}
|
||||
pkg := &model.Package{
|
||||
// NOTE: This behaves contrary to documented behaviour if the
|
||||
// package name is not the final component of the import path.
|
||||
// The reflect package doesn't expose the package name, though.
|
||||
Name: path.Base({{printf "%q" .ImportPath}}),
|
||||
}
|
||||
|
||||
for _, it := range its {
|
||||
intf, err := model.InterfaceFromInterfaceType(it.typ)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Reflection: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
intf.Name = it.sym
|
||||
pkg.Interfaces = append(pkg.Interfaces, intf)
|
||||
}
|
||||
|
||||
outfile := os.Stdout
|
||||
if len(*output) != 0 {
|
||||
var err error
|
||||
outfile, err = os.Create(*output)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to open output file %q", *output)
|
||||
}
|
||||
defer func() {
|
||||
if err := outfile.Close(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "failed to close output file %q", *output)
|
||||
os.Exit(1)
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if err := gob.NewEncoder(outfile).Encode(pkg); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "gob encode: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
`))
|
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build !go1.12
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
)
|
||||
|
||||
func printModuleVersion() {
|
||||
log.Printf("No version information is available for Mockgen compiled with " +
|
||||
"version 1.11")
|
||||
}
|
|
@ -0,0 +1,35 @@
|
|||
// Copyright 2019 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
|
||||
// +build go1.12
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
func printModuleVersion() {
|
||||
if bi, exists := debug.ReadBuildInfo(); exists {
|
||||
fmt.Println(bi.Main.Version)
|
||||
} else {
|
||||
log.Printf("No version information found. Make sure to use " +
|
||||
"GO111MODULE=on when running 'go get' in order to use specific " +
|
||||
"version of the binary.")
|
||||
}
|
||||
|
||||
}
|
|
@ -0,0 +1,7 @@
|
|||
# This is the official list of pprof authors for copyright purposes.
|
||||
# This file is distinct from the CONTRIBUTORS files.
|
||||
# See the latter for an explanation.
|
||||
# Names should be added to this file as:
|
||||
# Name or Organization <email address>
|
||||
# The email address is not required for organizations.
|
||||
Google Inc.
|
|
@ -0,0 +1,16 @@
|
|||
# People who have agreed to one of the CLAs and can contribute patches.
|
||||
# The AUTHORS file lists the copyright holders; this file
|
||||
# lists people. For example, Google employees are listed here
|
||||
# but not in AUTHORS, because Google holds the copyright.
|
||||
#
|
||||
# https://developers.google.com/open-source/cla/individual
|
||||
# https://developers.google.com/open-source/cla/corporate
|
||||
#
|
||||
# Names should be added to this file as:
|
||||
# Name <email address>
|
||||
Raul Silvera <rsilvera@google.com>
|
||||
Tipp Moseley <tipp@google.com>
|
||||
Hyoun Kyu Cho <netforce@google.com>
|
||||
Martin Spier <spiermar@gmail.com>
|
||||
Taco de Wolff <tacodewolff@gmail.com>
|
||||
Andrew Hunter <andrewhhunter@gmail.com>
|
|
@ -0,0 +1,202 @@
|
|||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "[]"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
|
@ -0,0 +1,567 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
)
|
||||
|
||||
func (p *Profile) decoder() []decoder {
|
||||
return profileDecoder
|
||||
}
|
||||
|
||||
// preEncode populates the unexported fields to be used by encode
|
||||
// (with suffix X) from the corresponding exported fields. The
|
||||
// exported fields are cleared up to facilitate testing.
|
||||
func (p *Profile) preEncode() {
|
||||
strings := make(map[string]int)
|
||||
addString(strings, "")
|
||||
|
||||
for _, st := range p.SampleType {
|
||||
st.typeX = addString(strings, st.Type)
|
||||
st.unitX = addString(strings, st.Unit)
|
||||
}
|
||||
|
||||
for _, s := range p.Sample {
|
||||
s.labelX = nil
|
||||
var keys []string
|
||||
for k := range s.Label {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
for _, k := range keys {
|
||||
vs := s.Label[k]
|
||||
for _, v := range vs {
|
||||
s.labelX = append(s.labelX,
|
||||
label{
|
||||
keyX: addString(strings, k),
|
||||
strX: addString(strings, v),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
var numKeys []string
|
||||
for k := range s.NumLabel {
|
||||
numKeys = append(numKeys, k)
|
||||
}
|
||||
sort.Strings(numKeys)
|
||||
for _, k := range numKeys {
|
||||
keyX := addString(strings, k)
|
||||
vs := s.NumLabel[k]
|
||||
units := s.NumUnit[k]
|
||||
for i, v := range vs {
|
||||
var unitX int64
|
||||
if len(units) != 0 {
|
||||
unitX = addString(strings, units[i])
|
||||
}
|
||||
s.labelX = append(s.labelX,
|
||||
label{
|
||||
keyX: keyX,
|
||||
numX: v,
|
||||
unitX: unitX,
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
s.locationIDX = make([]uint64, len(s.Location))
|
||||
for i, loc := range s.Location {
|
||||
s.locationIDX[i] = loc.ID
|
||||
}
|
||||
}
|
||||
|
||||
for _, m := range p.Mapping {
|
||||
m.fileX = addString(strings, m.File)
|
||||
m.buildIDX = addString(strings, m.BuildID)
|
||||
}
|
||||
|
||||
for _, l := range p.Location {
|
||||
for i, ln := range l.Line {
|
||||
if ln.Function != nil {
|
||||
l.Line[i].functionIDX = ln.Function.ID
|
||||
} else {
|
||||
l.Line[i].functionIDX = 0
|
||||
}
|
||||
}
|
||||
if l.Mapping != nil {
|
||||
l.mappingIDX = l.Mapping.ID
|
||||
} else {
|
||||
l.mappingIDX = 0
|
||||
}
|
||||
}
|
||||
for _, f := range p.Function {
|
||||
f.nameX = addString(strings, f.Name)
|
||||
f.systemNameX = addString(strings, f.SystemName)
|
||||
f.filenameX = addString(strings, f.Filename)
|
||||
}
|
||||
|
||||
p.dropFramesX = addString(strings, p.DropFrames)
|
||||
p.keepFramesX = addString(strings, p.KeepFrames)
|
||||
|
||||
if pt := p.PeriodType; pt != nil {
|
||||
pt.typeX = addString(strings, pt.Type)
|
||||
pt.unitX = addString(strings, pt.Unit)
|
||||
}
|
||||
|
||||
p.commentX = nil
|
||||
for _, c := range p.Comments {
|
||||
p.commentX = append(p.commentX, addString(strings, c))
|
||||
}
|
||||
|
||||
p.defaultSampleTypeX = addString(strings, p.DefaultSampleType)
|
||||
|
||||
p.stringTable = make([]string, len(strings))
|
||||
for s, i := range strings {
|
||||
p.stringTable[i] = s
|
||||
}
|
||||
}
|
||||
|
||||
func (p *Profile) encode(b *buffer) {
|
||||
for _, x := range p.SampleType {
|
||||
encodeMessage(b, 1, x)
|
||||
}
|
||||
for _, x := range p.Sample {
|
||||
encodeMessage(b, 2, x)
|
||||
}
|
||||
for _, x := range p.Mapping {
|
||||
encodeMessage(b, 3, x)
|
||||
}
|
||||
for _, x := range p.Location {
|
||||
encodeMessage(b, 4, x)
|
||||
}
|
||||
for _, x := range p.Function {
|
||||
encodeMessage(b, 5, x)
|
||||
}
|
||||
encodeStrings(b, 6, p.stringTable)
|
||||
encodeInt64Opt(b, 7, p.dropFramesX)
|
||||
encodeInt64Opt(b, 8, p.keepFramesX)
|
||||
encodeInt64Opt(b, 9, p.TimeNanos)
|
||||
encodeInt64Opt(b, 10, p.DurationNanos)
|
||||
if pt := p.PeriodType; pt != nil && (pt.typeX != 0 || pt.unitX != 0) {
|
||||
encodeMessage(b, 11, p.PeriodType)
|
||||
}
|
||||
encodeInt64Opt(b, 12, p.Period)
|
||||
encodeInt64s(b, 13, p.commentX)
|
||||
encodeInt64(b, 14, p.defaultSampleTypeX)
|
||||
}
|
||||
|
||||
var profileDecoder = []decoder{
|
||||
nil, // 0
|
||||
// repeated ValueType sample_type = 1
|
||||
func(b *buffer, m message) error {
|
||||
x := new(ValueType)
|
||||
pp := m.(*Profile)
|
||||
pp.SampleType = append(pp.SampleType, x)
|
||||
return decodeMessage(b, x)
|
||||
},
|
||||
// repeated Sample sample = 2
|
||||
func(b *buffer, m message) error {
|
||||
x := new(Sample)
|
||||
pp := m.(*Profile)
|
||||
pp.Sample = append(pp.Sample, x)
|
||||
return decodeMessage(b, x)
|
||||
},
|
||||
// repeated Mapping mapping = 3
|
||||
func(b *buffer, m message) error {
|
||||
x := new(Mapping)
|
||||
pp := m.(*Profile)
|
||||
pp.Mapping = append(pp.Mapping, x)
|
||||
return decodeMessage(b, x)
|
||||
},
|
||||
// repeated Location location = 4
|
||||
func(b *buffer, m message) error {
|
||||
x := new(Location)
|
||||
x.Line = make([]Line, 0, 8) // Pre-allocate Line buffer
|
||||
pp := m.(*Profile)
|
||||
pp.Location = append(pp.Location, x)
|
||||
err := decodeMessage(b, x)
|
||||
var tmp []Line
|
||||
x.Line = append(tmp, x.Line...) // Shrink to allocated size
|
||||
return err
|
||||
},
|
||||
// repeated Function function = 5
|
||||
func(b *buffer, m message) error {
|
||||
x := new(Function)
|
||||
pp := m.(*Profile)
|
||||
pp.Function = append(pp.Function, x)
|
||||
return decodeMessage(b, x)
|
||||
},
|
||||
// repeated string string_table = 6
|
||||
func(b *buffer, m message) error {
|
||||
err := decodeStrings(b, &m.(*Profile).stringTable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if m.(*Profile).stringTable[0] != "" {
|
||||
return errors.New("string_table[0] must be ''")
|
||||
}
|
||||
return nil
|
||||
},
|
||||
// int64 drop_frames = 7
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).dropFramesX) },
|
||||
// int64 keep_frames = 8
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).keepFramesX) },
|
||||
// int64 time_nanos = 9
|
||||
func(b *buffer, m message) error {
|
||||
if m.(*Profile).TimeNanos != 0 {
|
||||
return errConcatProfile
|
||||
}
|
||||
return decodeInt64(b, &m.(*Profile).TimeNanos)
|
||||
},
|
||||
// int64 duration_nanos = 10
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).DurationNanos) },
|
||||
// ValueType period_type = 11
|
||||
func(b *buffer, m message) error {
|
||||
x := new(ValueType)
|
||||
pp := m.(*Profile)
|
||||
pp.PeriodType = x
|
||||
return decodeMessage(b, x)
|
||||
},
|
||||
// int64 period = 12
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).Period) },
|
||||
// repeated int64 comment = 13
|
||||
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Profile).commentX) },
|
||||
// int64 defaultSampleType = 14
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Profile).defaultSampleTypeX) },
|
||||
}
|
||||
|
||||
// postDecode takes the unexported fields populated by decode (with
|
||||
// suffix X) and populates the corresponding exported fields.
|
||||
// The unexported fields are cleared up to facilitate testing.
|
||||
func (p *Profile) postDecode() error {
|
||||
var err error
|
||||
mappings := make(map[uint64]*Mapping, len(p.Mapping))
|
||||
mappingIds := make([]*Mapping, len(p.Mapping)+1)
|
||||
for _, m := range p.Mapping {
|
||||
m.File, err = getString(p.stringTable, &m.fileX, err)
|
||||
m.BuildID, err = getString(p.stringTable, &m.buildIDX, err)
|
||||
if m.ID < uint64(len(mappingIds)) {
|
||||
mappingIds[m.ID] = m
|
||||
} else {
|
||||
mappings[m.ID] = m
|
||||
}
|
||||
}
|
||||
|
||||
functions := make(map[uint64]*Function, len(p.Function))
|
||||
functionIds := make([]*Function, len(p.Function)+1)
|
||||
for _, f := range p.Function {
|
||||
f.Name, err = getString(p.stringTable, &f.nameX, err)
|
||||
f.SystemName, err = getString(p.stringTable, &f.systemNameX, err)
|
||||
f.Filename, err = getString(p.stringTable, &f.filenameX, err)
|
||||
if f.ID < uint64(len(functionIds)) {
|
||||
functionIds[f.ID] = f
|
||||
} else {
|
||||
functions[f.ID] = f
|
||||
}
|
||||
}
|
||||
|
||||
locations := make(map[uint64]*Location, len(p.Location))
|
||||
locationIds := make([]*Location, len(p.Location)+1)
|
||||
for _, l := range p.Location {
|
||||
if id := l.mappingIDX; id < uint64(len(mappingIds)) {
|
||||
l.Mapping = mappingIds[id]
|
||||
} else {
|
||||
l.Mapping = mappings[id]
|
||||
}
|
||||
l.mappingIDX = 0
|
||||
for i, ln := range l.Line {
|
||||
if id := ln.functionIDX; id != 0 {
|
||||
l.Line[i].functionIDX = 0
|
||||
if id < uint64(len(functionIds)) {
|
||||
l.Line[i].Function = functionIds[id]
|
||||
} else {
|
||||
l.Line[i].Function = functions[id]
|
||||
}
|
||||
}
|
||||
}
|
||||
if l.ID < uint64(len(locationIds)) {
|
||||
locationIds[l.ID] = l
|
||||
} else {
|
||||
locations[l.ID] = l
|
||||
}
|
||||
}
|
||||
|
||||
for _, st := range p.SampleType {
|
||||
st.Type, err = getString(p.stringTable, &st.typeX, err)
|
||||
st.Unit, err = getString(p.stringTable, &st.unitX, err)
|
||||
}
|
||||
|
||||
for _, s := range p.Sample {
|
||||
labels := make(map[string][]string, len(s.labelX))
|
||||
numLabels := make(map[string][]int64, len(s.labelX))
|
||||
numUnits := make(map[string][]string, len(s.labelX))
|
||||
for _, l := range s.labelX {
|
||||
var key, value string
|
||||
key, err = getString(p.stringTable, &l.keyX, err)
|
||||
if l.strX != 0 {
|
||||
value, err = getString(p.stringTable, &l.strX, err)
|
||||
labels[key] = append(labels[key], value)
|
||||
} else if l.numX != 0 || l.unitX != 0 {
|
||||
numValues := numLabels[key]
|
||||
units := numUnits[key]
|
||||
if l.unitX != 0 {
|
||||
var unit string
|
||||
unit, err = getString(p.stringTable, &l.unitX, err)
|
||||
units = padStringArray(units, len(numValues))
|
||||
numUnits[key] = append(units, unit)
|
||||
}
|
||||
numLabels[key] = append(numLabels[key], l.numX)
|
||||
}
|
||||
}
|
||||
if len(labels) > 0 {
|
||||
s.Label = labels
|
||||
}
|
||||
if len(numLabels) > 0 {
|
||||
s.NumLabel = numLabels
|
||||
for key, units := range numUnits {
|
||||
if len(units) > 0 {
|
||||
numUnits[key] = padStringArray(units, len(numLabels[key]))
|
||||
}
|
||||
}
|
||||
s.NumUnit = numUnits
|
||||
}
|
||||
s.Location = make([]*Location, len(s.locationIDX))
|
||||
for i, lid := range s.locationIDX {
|
||||
if lid < uint64(len(locationIds)) {
|
||||
s.Location[i] = locationIds[lid]
|
||||
} else {
|
||||
s.Location[i] = locations[lid]
|
||||
}
|
||||
}
|
||||
s.locationIDX = nil
|
||||
}
|
||||
|
||||
p.DropFrames, err = getString(p.stringTable, &p.dropFramesX, err)
|
||||
p.KeepFrames, err = getString(p.stringTable, &p.keepFramesX, err)
|
||||
|
||||
if pt := p.PeriodType; pt == nil {
|
||||
p.PeriodType = &ValueType{}
|
||||
}
|
||||
|
||||
if pt := p.PeriodType; pt != nil {
|
||||
pt.Type, err = getString(p.stringTable, &pt.typeX, err)
|
||||
pt.Unit, err = getString(p.stringTable, &pt.unitX, err)
|
||||
}
|
||||
|
||||
for _, i := range p.commentX {
|
||||
var c string
|
||||
c, err = getString(p.stringTable, &i, err)
|
||||
p.Comments = append(p.Comments, c)
|
||||
}
|
||||
|
||||
p.commentX = nil
|
||||
p.DefaultSampleType, err = getString(p.stringTable, &p.defaultSampleTypeX, err)
|
||||
p.stringTable = nil
|
||||
return err
|
||||
}
|
||||
|
||||
// padStringArray pads arr with enough empty strings to make arr
|
||||
// length l when arr's length is less than l.
|
||||
func padStringArray(arr []string, l int) []string {
|
||||
if l <= len(arr) {
|
||||
return arr
|
||||
}
|
||||
return append(arr, make([]string, l-len(arr))...)
|
||||
}
|
||||
|
||||
func (p *ValueType) decoder() []decoder {
|
||||
return valueTypeDecoder
|
||||
}
|
||||
|
||||
func (p *ValueType) encode(b *buffer) {
|
||||
encodeInt64Opt(b, 1, p.typeX)
|
||||
encodeInt64Opt(b, 2, p.unitX)
|
||||
}
|
||||
|
||||
var valueTypeDecoder = []decoder{
|
||||
nil, // 0
|
||||
// optional int64 type = 1
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).typeX) },
|
||||
// optional int64 unit = 2
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*ValueType).unitX) },
|
||||
}
|
||||
|
||||
func (p *Sample) decoder() []decoder {
|
||||
return sampleDecoder
|
||||
}
|
||||
|
||||
func (p *Sample) encode(b *buffer) {
|
||||
encodeUint64s(b, 1, p.locationIDX)
|
||||
encodeInt64s(b, 2, p.Value)
|
||||
for _, x := range p.labelX {
|
||||
encodeMessage(b, 3, x)
|
||||
}
|
||||
}
|
||||
|
||||
var sampleDecoder = []decoder{
|
||||
nil, // 0
|
||||
// repeated uint64 location = 1
|
||||
func(b *buffer, m message) error { return decodeUint64s(b, &m.(*Sample).locationIDX) },
|
||||
// repeated int64 value = 2
|
||||
func(b *buffer, m message) error { return decodeInt64s(b, &m.(*Sample).Value) },
|
||||
// repeated Label label = 3
|
||||
func(b *buffer, m message) error {
|
||||
s := m.(*Sample)
|
||||
n := len(s.labelX)
|
||||
s.labelX = append(s.labelX, label{})
|
||||
return decodeMessage(b, &s.labelX[n])
|
||||
},
|
||||
}
|
||||
|
||||
func (p label) decoder() []decoder {
|
||||
return labelDecoder
|
||||
}
|
||||
|
||||
func (p label) encode(b *buffer) {
|
||||
encodeInt64Opt(b, 1, p.keyX)
|
||||
encodeInt64Opt(b, 2, p.strX)
|
||||
encodeInt64Opt(b, 3, p.numX)
|
||||
encodeInt64Opt(b, 4, p.unitX)
|
||||
}
|
||||
|
||||
var labelDecoder = []decoder{
|
||||
nil, // 0
|
||||
// optional int64 key = 1
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).keyX) },
|
||||
// optional int64 str = 2
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).strX) },
|
||||
// optional int64 num = 3
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).numX) },
|
||||
// optional int64 num = 4
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*label).unitX) },
|
||||
}
|
||||
|
||||
func (p *Mapping) decoder() []decoder {
|
||||
return mappingDecoder
|
||||
}
|
||||
|
||||
func (p *Mapping) encode(b *buffer) {
|
||||
encodeUint64Opt(b, 1, p.ID)
|
||||
encodeUint64Opt(b, 2, p.Start)
|
||||
encodeUint64Opt(b, 3, p.Limit)
|
||||
encodeUint64Opt(b, 4, p.Offset)
|
||||
encodeInt64Opt(b, 5, p.fileX)
|
||||
encodeInt64Opt(b, 6, p.buildIDX)
|
||||
encodeBoolOpt(b, 7, p.HasFunctions)
|
||||
encodeBoolOpt(b, 8, p.HasFilenames)
|
||||
encodeBoolOpt(b, 9, p.HasLineNumbers)
|
||||
encodeBoolOpt(b, 10, p.HasInlineFrames)
|
||||
}
|
||||
|
||||
var mappingDecoder = []decoder{
|
||||
nil, // 0
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).ID) }, // optional uint64 id = 1
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Start) }, // optional uint64 memory_offset = 2
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Limit) }, // optional uint64 memory_limit = 3
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Mapping).Offset) }, // optional uint64 file_offset = 4
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).fileX) }, // optional int64 filename = 5
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Mapping).buildIDX) }, // optional int64 build_id = 6
|
||||
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFunctions) }, // optional bool has_functions = 7
|
||||
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasFilenames) }, // optional bool has_filenames = 8
|
||||
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasLineNumbers) }, // optional bool has_line_numbers = 9
|
||||
func(b *buffer, m message) error { return decodeBool(b, &m.(*Mapping).HasInlineFrames) }, // optional bool has_inline_frames = 10
|
||||
}
|
||||
|
||||
func (p *Location) decoder() []decoder {
|
||||
return locationDecoder
|
||||
}
|
||||
|
||||
func (p *Location) encode(b *buffer) {
|
||||
encodeUint64Opt(b, 1, p.ID)
|
||||
encodeUint64Opt(b, 2, p.mappingIDX)
|
||||
encodeUint64Opt(b, 3, p.Address)
|
||||
for i := range p.Line {
|
||||
encodeMessage(b, 4, &p.Line[i])
|
||||
}
|
||||
encodeBoolOpt(b, 5, p.IsFolded)
|
||||
}
|
||||
|
||||
var locationDecoder = []decoder{
|
||||
nil, // 0
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).ID) }, // optional uint64 id = 1;
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).mappingIDX) }, // optional uint64 mapping_id = 2;
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Location).Address) }, // optional uint64 address = 3;
|
||||
func(b *buffer, m message) error { // repeated Line line = 4
|
||||
pp := m.(*Location)
|
||||
n := len(pp.Line)
|
||||
pp.Line = append(pp.Line, Line{})
|
||||
return decodeMessage(b, &pp.Line[n])
|
||||
},
|
||||
func(b *buffer, m message) error { return decodeBool(b, &m.(*Location).IsFolded) }, // optional bool is_folded = 5;
|
||||
}
|
||||
|
||||
func (p *Line) decoder() []decoder {
|
||||
return lineDecoder
|
||||
}
|
||||
|
||||
func (p *Line) encode(b *buffer) {
|
||||
encodeUint64Opt(b, 1, p.functionIDX)
|
||||
encodeInt64Opt(b, 2, p.Line)
|
||||
}
|
||||
|
||||
var lineDecoder = []decoder{
|
||||
nil, // 0
|
||||
// optional uint64 function_id = 1
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Line).functionIDX) },
|
||||
// optional int64 line = 2
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Line).Line) },
|
||||
}
|
||||
|
||||
func (p *Function) decoder() []decoder {
|
||||
return functionDecoder
|
||||
}
|
||||
|
||||
func (p *Function) encode(b *buffer) {
|
||||
encodeUint64Opt(b, 1, p.ID)
|
||||
encodeInt64Opt(b, 2, p.nameX)
|
||||
encodeInt64Opt(b, 3, p.systemNameX)
|
||||
encodeInt64Opt(b, 4, p.filenameX)
|
||||
encodeInt64Opt(b, 5, p.StartLine)
|
||||
}
|
||||
|
||||
var functionDecoder = []decoder{
|
||||
nil, // 0
|
||||
// optional uint64 id = 1
|
||||
func(b *buffer, m message) error { return decodeUint64(b, &m.(*Function).ID) },
|
||||
// optional int64 function_name = 2
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).nameX) },
|
||||
// optional int64 function_system_name = 3
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).systemNameX) },
|
||||
// repeated int64 filename = 4
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).filenameX) },
|
||||
// optional int64 start_line = 5
|
||||
func(b *buffer, m message) error { return decodeInt64(b, &m.(*Function).StartLine) },
|
||||
}
|
||||
|
||||
func addString(strings map[string]int, s string) int64 {
|
||||
i, ok := strings[s]
|
||||
if !ok {
|
||||
i = len(strings)
|
||||
strings[s] = i
|
||||
}
|
||||
return int64(i)
|
||||
}
|
||||
|
||||
func getString(strings []string, strng *int64, err error) (string, error) {
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
s := int(*strng)
|
||||
if s < 0 || s >= len(strings) {
|
||||
return "", errMalformed
|
||||
}
|
||||
*strng = 0
|
||||
return strings[s], nil
|
||||
}
|
|
@ -0,0 +1,270 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package profile
|
||||
|
||||
// Implements methods to filter samples from profiles.
|
||||
|
||||
import "regexp"
|
||||
|
||||
// FilterSamplesByName filters the samples in a profile and only keeps
|
||||
// samples where at least one frame matches focus but none match ignore.
|
||||
// Returns true is the corresponding regexp matched at least one sample.
|
||||
func (p *Profile) FilterSamplesByName(focus, ignore, hide, show *regexp.Regexp) (fm, im, hm, hnm bool) {
|
||||
focusOrIgnore := make(map[uint64]bool)
|
||||
hidden := make(map[uint64]bool)
|
||||
for _, l := range p.Location {
|
||||
if ignore != nil && l.matchesName(ignore) {
|
||||
im = true
|
||||
focusOrIgnore[l.ID] = false
|
||||
} else if focus == nil || l.matchesName(focus) {
|
||||
fm = true
|
||||
focusOrIgnore[l.ID] = true
|
||||
}
|
||||
|
||||
if hide != nil && l.matchesName(hide) {
|
||||
hm = true
|
||||
l.Line = l.unmatchedLines(hide)
|
||||
if len(l.Line) == 0 {
|
||||
hidden[l.ID] = true
|
||||
}
|
||||
}
|
||||
if show != nil {
|
||||
l.Line = l.matchedLines(show)
|
||||
if len(l.Line) == 0 {
|
||||
hidden[l.ID] = true
|
||||
} else {
|
||||
hnm = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
s := make([]*Sample, 0, len(p.Sample))
|
||||
for _, sample := range p.Sample {
|
||||
if focusedAndNotIgnored(sample.Location, focusOrIgnore) {
|
||||
if len(hidden) > 0 {
|
||||
var locs []*Location
|
||||
for _, loc := range sample.Location {
|
||||
if !hidden[loc.ID] {
|
||||
locs = append(locs, loc)
|
||||
}
|
||||
}
|
||||
if len(locs) == 0 {
|
||||
// Remove sample with no locations (by not adding it to s).
|
||||
continue
|
||||
}
|
||||
sample.Location = locs
|
||||
}
|
||||
s = append(s, sample)
|
||||
}
|
||||
}
|
||||
p.Sample = s
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ShowFrom drops all stack frames above the highest matching frame and returns
|
||||
// whether a match was found. If showFrom is nil it returns false and does not
|
||||
// modify the profile.
|
||||
//
|
||||
// Example: consider a sample with frames [A, B, C, B], where A is the root.
|
||||
// ShowFrom(nil) returns false and has frames [A, B, C, B].
|
||||
// ShowFrom(A) returns true and has frames [A, B, C, B].
|
||||
// ShowFrom(B) returns true and has frames [B, C, B].
|
||||
// ShowFrom(C) returns true and has frames [C, B].
|
||||
// ShowFrom(D) returns false and drops the sample because no frames remain.
|
||||
func (p *Profile) ShowFrom(showFrom *regexp.Regexp) (matched bool) {
|
||||
if showFrom == nil {
|
||||
return false
|
||||
}
|
||||
// showFromLocs stores location IDs that matched ShowFrom.
|
||||
showFromLocs := make(map[uint64]bool)
|
||||
// Apply to locations.
|
||||
for _, loc := range p.Location {
|
||||
if filterShowFromLocation(loc, showFrom) {
|
||||
showFromLocs[loc.ID] = true
|
||||
matched = true
|
||||
}
|
||||
}
|
||||
// For all samples, strip locations after the highest matching one.
|
||||
s := make([]*Sample, 0, len(p.Sample))
|
||||
for _, sample := range p.Sample {
|
||||
for i := len(sample.Location) - 1; i >= 0; i-- {
|
||||
if showFromLocs[sample.Location[i].ID] {
|
||||
sample.Location = sample.Location[:i+1]
|
||||
s = append(s, sample)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
p.Sample = s
|
||||
return matched
|
||||
}
|
||||
|
||||
// filterShowFromLocation tests a showFrom regex against a location, removes
|
||||
// lines after the last match and returns whether a match was found. If the
|
||||
// mapping is matched, then all lines are kept.
|
||||
func filterShowFromLocation(loc *Location, showFrom *regexp.Regexp) bool {
|
||||
if m := loc.Mapping; m != nil && showFrom.MatchString(m.File) {
|
||||
return true
|
||||
}
|
||||
if i := loc.lastMatchedLineIndex(showFrom); i >= 0 {
|
||||
loc.Line = loc.Line[:i+1]
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// lastMatchedLineIndex returns the index of the last line that matches a regex,
|
||||
// or -1 if no match is found.
|
||||
func (loc *Location) lastMatchedLineIndex(re *regexp.Regexp) int {
|
||||
for i := len(loc.Line) - 1; i >= 0; i-- {
|
||||
if fn := loc.Line[i].Function; fn != nil {
|
||||
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||
return i
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// FilterTagsByName filters the tags in a profile and only keeps
|
||||
// tags that match show and not hide.
|
||||
func (p *Profile) FilterTagsByName(show, hide *regexp.Regexp) (sm, hm bool) {
|
||||
matchRemove := func(name string) bool {
|
||||
matchShow := show == nil || show.MatchString(name)
|
||||
matchHide := hide != nil && hide.MatchString(name)
|
||||
|
||||
if matchShow {
|
||||
sm = true
|
||||
}
|
||||
if matchHide {
|
||||
hm = true
|
||||
}
|
||||
return !matchShow || matchHide
|
||||
}
|
||||
for _, s := range p.Sample {
|
||||
for lab := range s.Label {
|
||||
if matchRemove(lab) {
|
||||
delete(s.Label, lab)
|
||||
}
|
||||
}
|
||||
for lab := range s.NumLabel {
|
||||
if matchRemove(lab) {
|
||||
delete(s.NumLabel, lab)
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// matchesName returns whether the location matches the regular
|
||||
// expression. It checks any available function names, file names, and
|
||||
// mapping object filename.
|
||||
func (loc *Location) matchesName(re *regexp.Regexp) bool {
|
||||
for _, ln := range loc.Line {
|
||||
if fn := ln.Function; fn != nil {
|
||||
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// unmatchedLines returns the lines in the location that do not match
|
||||
// the regular expression.
|
||||
func (loc *Location) unmatchedLines(re *regexp.Regexp) []Line {
|
||||
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||
return nil
|
||||
}
|
||||
var lines []Line
|
||||
for _, ln := range loc.Line {
|
||||
if fn := ln.Function; fn != nil {
|
||||
if re.MatchString(fn.Name) || re.MatchString(fn.Filename) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
lines = append(lines, ln)
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// matchedLines returns the lines in the location that match
|
||||
// the regular expression.
|
||||
func (loc *Location) matchedLines(re *regexp.Regexp) []Line {
|
||||
if m := loc.Mapping; m != nil && re.MatchString(m.File) {
|
||||
return loc.Line
|
||||
}
|
||||
var lines []Line
|
||||
for _, ln := range loc.Line {
|
||||
if fn := ln.Function; fn != nil {
|
||||
if !re.MatchString(fn.Name) && !re.MatchString(fn.Filename) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
lines = append(lines, ln)
|
||||
}
|
||||
return lines
|
||||
}
|
||||
|
||||
// focusedAndNotIgnored looks up a slice of ids against a map of
|
||||
// focused/ignored locations. The map only contains locations that are
|
||||
// explicitly focused or ignored. Returns whether there is at least
|
||||
// one focused location but no ignored locations.
|
||||
func focusedAndNotIgnored(locs []*Location, m map[uint64]bool) bool {
|
||||
var f bool
|
||||
for _, loc := range locs {
|
||||
if focus, focusOrIgnore := m[loc.ID]; focusOrIgnore {
|
||||
if focus {
|
||||
// Found focused location. Must keep searching in case there
|
||||
// is an ignored one as well.
|
||||
f = true
|
||||
} else {
|
||||
// Found ignored location. Can return false right away.
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return f
|
||||
}
|
||||
|
||||
// TagMatch selects tags for filtering
|
||||
type TagMatch func(s *Sample) bool
|
||||
|
||||
// FilterSamplesByTag removes all samples from the profile, except
|
||||
// those that match focus and do not match the ignore regular
|
||||
// expression.
|
||||
func (p *Profile) FilterSamplesByTag(focus, ignore TagMatch) (fm, im bool) {
|
||||
samples := make([]*Sample, 0, len(p.Sample))
|
||||
for _, s := range p.Sample {
|
||||
focused, ignored := true, false
|
||||
if focus != nil {
|
||||
focused = focus(s)
|
||||
}
|
||||
if ignore != nil {
|
||||
ignored = ignore(s)
|
||||
}
|
||||
fm = fm || focused
|
||||
im = im || ignored
|
||||
if focused && !ignored {
|
||||
samples = append(samples, s)
|
||||
}
|
||||
}
|
||||
p.Sample = samples
|
||||
return
|
||||
}
|
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// SampleIndexByName returns the appropriate index for a value of sample index.
|
||||
// If numeric, it returns the number, otherwise it looks up the text in the
|
||||
// profile sample types.
|
||||
func (p *Profile) SampleIndexByName(sampleIndex string) (int, error) {
|
||||
if sampleIndex == "" {
|
||||
if dst := p.DefaultSampleType; dst != "" {
|
||||
for i, t := range sampleTypes(p) {
|
||||
if t == dst {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
}
|
||||
// By default select the last sample value
|
||||
return len(p.SampleType) - 1, nil
|
||||
}
|
||||
if i, err := strconv.Atoi(sampleIndex); err == nil {
|
||||
if i < 0 || i >= len(p.SampleType) {
|
||||
return 0, fmt.Errorf("sample_index %s is outside the range [0..%d]", sampleIndex, len(p.SampleType)-1)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
// Remove the inuse_ prefix to support legacy pprof options
|
||||
// "inuse_space" and "inuse_objects" for profiles containing types
|
||||
// "space" and "objects".
|
||||
noInuse := strings.TrimPrefix(sampleIndex, "inuse_")
|
||||
for i, t := range p.SampleType {
|
||||
if t.Type == sampleIndex || t.Type == noInuse {
|
||||
return i, nil
|
||||
}
|
||||
}
|
||||
|
||||
return 0, fmt.Errorf("sample_index %q must be one of: %v", sampleIndex, sampleTypes(p))
|
||||
}
|
||||
|
||||
func sampleTypes(p *Profile) []string {
|
||||
types := make([]string, len(p.SampleType))
|
||||
for i, t := range p.SampleType {
|
||||
types[i] = t.Type
|
||||
}
|
||||
return types
|
||||
}
|
|
@ -0,0 +1,315 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file implements parsers to convert java legacy profiles into
|
||||
// the profile.proto format.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
attributeRx = regexp.MustCompile(`([\w ]+)=([\w ]+)`)
|
||||
javaSampleRx = regexp.MustCompile(` *(\d+) +(\d+) +@ +([ x0-9a-f]*)`)
|
||||
javaLocationRx = regexp.MustCompile(`^\s*0x([[:xdigit:]]+)\s+(.*)\s*$`)
|
||||
javaLocationFileLineRx = regexp.MustCompile(`^(.*)\s+\((.+):(-?[[:digit:]]+)\)$`)
|
||||
javaLocationPathRx = regexp.MustCompile(`^(.*)\s+\((.*)\)$`)
|
||||
)
|
||||
|
||||
// javaCPUProfile returns a new Profile from profilez data.
|
||||
// b is the profile bytes after the header, period is the profiling
|
||||
// period, and parse is a function to parse 8-byte chunks from the
|
||||
// profile in its native endianness.
|
||||
func javaCPUProfile(b []byte, period int64, parse func(b []byte) (uint64, []byte)) (*Profile, error) {
|
||||
p := &Profile{
|
||||
Period: period * 1000,
|
||||
PeriodType: &ValueType{Type: "cpu", Unit: "nanoseconds"},
|
||||
SampleType: []*ValueType{{Type: "samples", Unit: "count"}, {Type: "cpu", Unit: "nanoseconds"}},
|
||||
}
|
||||
var err error
|
||||
var locs map[uint64]*Location
|
||||
if b, locs, err = parseCPUSamples(b, parse, false, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = parseJavaLocations(b, locs, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Strip out addresses for better merge.
|
||||
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseJavaProfile returns a new profile from heapz or contentionz
|
||||
// data. b is the profile bytes after the header.
|
||||
func parseJavaProfile(b []byte) (*Profile, error) {
|
||||
h := bytes.SplitAfterN(b, []byte("\n"), 2)
|
||||
if len(h) < 2 {
|
||||
return nil, errUnrecognized
|
||||
}
|
||||
|
||||
p := &Profile{
|
||||
PeriodType: &ValueType{},
|
||||
}
|
||||
header := string(bytes.TrimSpace(h[0]))
|
||||
|
||||
var err error
|
||||
var pType string
|
||||
switch header {
|
||||
case "--- heapz 1 ---":
|
||||
pType = "heap"
|
||||
case "--- contentionz 1 ---":
|
||||
pType = "contention"
|
||||
default:
|
||||
return nil, errUnrecognized
|
||||
}
|
||||
|
||||
if b, err = parseJavaHeader(pType, h[1], p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var locs map[uint64]*Location
|
||||
if b, locs, err = parseJavaSamples(pType, b, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err = parseJavaLocations(b, locs, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Strip out addresses for better merge.
|
||||
if err = p.Aggregate(true, true, true, true, false); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// parseJavaHeader parses the attribute section on a java profile and
|
||||
// populates a profile. Returns the remainder of the buffer after all
|
||||
// attributes.
|
||||
func parseJavaHeader(pType string, b []byte, p *Profile) ([]byte, error) {
|
||||
nextNewLine := bytes.IndexByte(b, byte('\n'))
|
||||
for nextNewLine != -1 {
|
||||
line := string(bytes.TrimSpace(b[0:nextNewLine]))
|
||||
if line != "" {
|
||||
h := attributeRx.FindStringSubmatch(line)
|
||||
if h == nil {
|
||||
// Not a valid attribute, exit.
|
||||
return b, nil
|
||||
}
|
||||
|
||||
attribute, value := strings.TrimSpace(h[1]), strings.TrimSpace(h[2])
|
||||
var err error
|
||||
switch pType + "/" + attribute {
|
||||
case "heap/format", "cpu/format", "contention/format":
|
||||
if value != "java" {
|
||||
return nil, errUnrecognized
|
||||
}
|
||||
case "heap/resolution":
|
||||
p.SampleType = []*ValueType{
|
||||
{Type: "inuse_objects", Unit: "count"},
|
||||
{Type: "inuse_space", Unit: value},
|
||||
}
|
||||
case "contention/resolution":
|
||||
p.SampleType = []*ValueType{
|
||||
{Type: "contentions", Unit: "count"},
|
||||
{Type: "delay", Unit: value},
|
||||
}
|
||||
case "contention/sampling period":
|
||||
p.PeriodType = &ValueType{
|
||||
Type: "contentions", Unit: "count",
|
||||
}
|
||||
if p.Period, err = strconv.ParseInt(value, 0, 64); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
|
||||
}
|
||||
case "contention/ms since reset":
|
||||
millis, err := strconv.ParseInt(value, 0, 64)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse attribute %s: %v", line, err)
|
||||
}
|
||||
p.DurationNanos = millis * 1000 * 1000
|
||||
default:
|
||||
return nil, errUnrecognized
|
||||
}
|
||||
}
|
||||
// Grab next line.
|
||||
b = b[nextNewLine+1:]
|
||||
nextNewLine = bytes.IndexByte(b, byte('\n'))
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// parseJavaSamples parses the samples from a java profile and
|
||||
// populates the Samples in a profile. Returns the remainder of the
|
||||
// buffer after the samples.
|
||||
func parseJavaSamples(pType string, b []byte, p *Profile) ([]byte, map[uint64]*Location, error) {
|
||||
nextNewLine := bytes.IndexByte(b, byte('\n'))
|
||||
locs := make(map[uint64]*Location)
|
||||
for nextNewLine != -1 {
|
||||
line := string(bytes.TrimSpace(b[0:nextNewLine]))
|
||||
if line != "" {
|
||||
sample := javaSampleRx.FindStringSubmatch(line)
|
||||
if sample == nil {
|
||||
// Not a valid sample, exit.
|
||||
return b, locs, nil
|
||||
}
|
||||
|
||||
// Java profiles have data/fields inverted compared to other
|
||||
// profile types.
|
||||
var err error
|
||||
value1, value2, value3 := sample[2], sample[1], sample[3]
|
||||
addrs, err := parseHexAddresses(value3)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf("malformed sample: %s: %v", line, err)
|
||||
}
|
||||
|
||||
var sloc []*Location
|
||||
for _, addr := range addrs {
|
||||
loc := locs[addr]
|
||||
if locs[addr] == nil {
|
||||
loc = &Location{
|
||||
Address: addr,
|
||||
}
|
||||
p.Location = append(p.Location, loc)
|
||||
locs[addr] = loc
|
||||
}
|
||||
sloc = append(sloc, loc)
|
||||
}
|
||||
s := &Sample{
|
||||
Value: make([]int64, 2),
|
||||
Location: sloc,
|
||||
}
|
||||
|
||||
if s.Value[0], err = strconv.ParseInt(value1, 0, 64); err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
|
||||
}
|
||||
if s.Value[1], err = strconv.ParseInt(value2, 0, 64); err != nil {
|
||||
return nil, nil, fmt.Errorf("parsing sample %s: %v", line, err)
|
||||
}
|
||||
|
||||
switch pType {
|
||||
case "heap":
|
||||
const javaHeapzSamplingRate = 524288 // 512K
|
||||
if s.Value[0] == 0 {
|
||||
return nil, nil, fmt.Errorf("parsing sample %s: second value must be non-zero", line)
|
||||
}
|
||||
s.NumLabel = map[string][]int64{"bytes": {s.Value[1] / s.Value[0]}}
|
||||
s.Value[0], s.Value[1] = scaleHeapSample(s.Value[0], s.Value[1], javaHeapzSamplingRate)
|
||||
case "contention":
|
||||
if period := p.Period; period != 0 {
|
||||
s.Value[0] = s.Value[0] * p.Period
|
||||
s.Value[1] = s.Value[1] * p.Period
|
||||
}
|
||||
}
|
||||
p.Sample = append(p.Sample, s)
|
||||
}
|
||||
// Grab next line.
|
||||
b = b[nextNewLine+1:]
|
||||
nextNewLine = bytes.IndexByte(b, byte('\n'))
|
||||
}
|
||||
return b, locs, nil
|
||||
}
|
||||
|
||||
// parseJavaLocations parses the location information in a java
|
||||
// profile and populates the Locations in a profile. It uses the
|
||||
// location addresses from the profile as both the ID of each
|
||||
// location.
|
||||
func parseJavaLocations(b []byte, locs map[uint64]*Location, p *Profile) error {
|
||||
r := bytes.NewBuffer(b)
|
||||
fns := make(map[string]*Function)
|
||||
for {
|
||||
line, err := r.ReadString('\n')
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return err
|
||||
}
|
||||
if line == "" {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if line = strings.TrimSpace(line); line == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
jloc := javaLocationRx.FindStringSubmatch(line)
|
||||
if len(jloc) != 3 {
|
||||
continue
|
||||
}
|
||||
addr, err := strconv.ParseUint(jloc[1], 16, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf("parsing sample %s: %v", line, err)
|
||||
}
|
||||
loc := locs[addr]
|
||||
if loc == nil {
|
||||
// Unused/unseen
|
||||
continue
|
||||
}
|
||||
var lineFunc, lineFile string
|
||||
var lineNo int64
|
||||
|
||||
if fileLine := javaLocationFileLineRx.FindStringSubmatch(jloc[2]); len(fileLine) == 4 {
|
||||
// Found a line of the form: "function (file:line)"
|
||||
lineFunc, lineFile = fileLine[1], fileLine[2]
|
||||
if n, err := strconv.ParseInt(fileLine[3], 10, 64); err == nil && n > 0 {
|
||||
lineNo = n
|
||||
}
|
||||
} else if filePath := javaLocationPathRx.FindStringSubmatch(jloc[2]); len(filePath) == 3 {
|
||||
// If there's not a file:line, it's a shared library path.
|
||||
// The path isn't interesting, so just give the .so.
|
||||
lineFunc, lineFile = filePath[1], filepath.Base(filePath[2])
|
||||
} else if strings.Contains(jloc[2], "generated stub/JIT") {
|
||||
lineFunc = "STUB"
|
||||
} else {
|
||||
// Treat whole line as the function name. This is used by the
|
||||
// java agent for internal states such as "GC" or "VM".
|
||||
lineFunc = jloc[2]
|
||||
}
|
||||
fn := fns[lineFunc]
|
||||
|
||||
if fn == nil {
|
||||
fn = &Function{
|
||||
Name: lineFunc,
|
||||
SystemName: lineFunc,
|
||||
Filename: lineFile,
|
||||
}
|
||||
fns[lineFunc] = fn
|
||||
p.Function = append(p.Function, fn)
|
||||
}
|
||||
loc.Line = []Line{
|
||||
{
|
||||
Function: fn,
|
||||
Line: lineNo,
|
||||
},
|
||||
}
|
||||
loc.Address = 0
|
||||
}
|
||||
|
||||
p.remapLocationIDs()
|
||||
p.remapFunctionIDs()
|
||||
p.remapMappingIDs()
|
||||
|
||||
return nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,481 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Compact performs garbage collection on a profile to remove any
|
||||
// unreferenced fields. This is useful to reduce the size of a profile
|
||||
// after samples or locations have been removed.
|
||||
func (p *Profile) Compact() *Profile {
|
||||
p, _ = Merge([]*Profile{p})
|
||||
return p
|
||||
}
|
||||
|
||||
// Merge merges all the profiles in profs into a single Profile.
|
||||
// Returns a new profile independent of the input profiles. The merged
|
||||
// profile is compacted to eliminate unused samples, locations,
|
||||
// functions and mappings. Profiles must have identical profile sample
|
||||
// and period types or the merge will fail. profile.Period of the
|
||||
// resulting profile will be the maximum of all profiles, and
|
||||
// profile.TimeNanos will be the earliest nonzero one. Merges are
|
||||
// associative with the caveat of the first profile having some
|
||||
// specialization in how headers are combined. There may be other
|
||||
// subtleties now or in the future regarding associativity.
|
||||
func Merge(srcs []*Profile) (*Profile, error) {
|
||||
if len(srcs) == 0 {
|
||||
return nil, fmt.Errorf("no profiles to merge")
|
||||
}
|
||||
p, err := combineHeaders(srcs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
pm := &profileMerger{
|
||||
p: p,
|
||||
samples: make(map[sampleKey]*Sample, len(srcs[0].Sample)),
|
||||
locations: make(map[locationKey]*Location, len(srcs[0].Location)),
|
||||
functions: make(map[functionKey]*Function, len(srcs[0].Function)),
|
||||
mappings: make(map[mappingKey]*Mapping, len(srcs[0].Mapping)),
|
||||
}
|
||||
|
||||
for _, src := range srcs {
|
||||
// Clear the profile-specific hash tables
|
||||
pm.locationsByID = make(map[uint64]*Location, len(src.Location))
|
||||
pm.functionsByID = make(map[uint64]*Function, len(src.Function))
|
||||
pm.mappingsByID = make(map[uint64]mapInfo, len(src.Mapping))
|
||||
|
||||
if len(pm.mappings) == 0 && len(src.Mapping) > 0 {
|
||||
// The Mapping list has the property that the first mapping
|
||||
// represents the main binary. Take the first Mapping we see,
|
||||
// otherwise the operations below will add mappings in an
|
||||
// arbitrary order.
|
||||
pm.mapMapping(src.Mapping[0])
|
||||
}
|
||||
|
||||
for _, s := range src.Sample {
|
||||
if !isZeroSample(s) {
|
||||
pm.mapSample(s)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, s := range p.Sample {
|
||||
if isZeroSample(s) {
|
||||
// If there are any zero samples, re-merge the profile to GC
|
||||
// them.
|
||||
return Merge([]*Profile{p})
|
||||
}
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// Normalize normalizes the source profile by multiplying each value in profile by the
|
||||
// ratio of the sum of the base profile's values of that sample type to the sum of the
|
||||
// source profile's value of that sample type.
|
||||
func (p *Profile) Normalize(pb *Profile) error {
|
||||
|
||||
if err := p.compatible(pb); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
baseVals := make([]int64, len(p.SampleType))
|
||||
for _, s := range pb.Sample {
|
||||
for i, v := range s.Value {
|
||||
baseVals[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
srcVals := make([]int64, len(p.SampleType))
|
||||
for _, s := range p.Sample {
|
||||
for i, v := range s.Value {
|
||||
srcVals[i] += v
|
||||
}
|
||||
}
|
||||
|
||||
normScale := make([]float64, len(baseVals))
|
||||
for i := range baseVals {
|
||||
if srcVals[i] == 0 {
|
||||
normScale[i] = 0.0
|
||||
} else {
|
||||
normScale[i] = float64(baseVals[i]) / float64(srcVals[i])
|
||||
}
|
||||
}
|
||||
p.ScaleN(normScale)
|
||||
return nil
|
||||
}
|
||||
|
||||
func isZeroSample(s *Sample) bool {
|
||||
for _, v := range s.Value {
|
||||
if v != 0 {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type profileMerger struct {
|
||||
p *Profile
|
||||
|
||||
// Memoization tables within a profile.
|
||||
locationsByID map[uint64]*Location
|
||||
functionsByID map[uint64]*Function
|
||||
mappingsByID map[uint64]mapInfo
|
||||
|
||||
// Memoization tables for profile entities.
|
||||
samples map[sampleKey]*Sample
|
||||
locations map[locationKey]*Location
|
||||
functions map[functionKey]*Function
|
||||
mappings map[mappingKey]*Mapping
|
||||
}
|
||||
|
||||
type mapInfo struct {
|
||||
m *Mapping
|
||||
offset int64
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapSample(src *Sample) *Sample {
|
||||
s := &Sample{
|
||||
Location: make([]*Location, len(src.Location)),
|
||||
Value: make([]int64, len(src.Value)),
|
||||
Label: make(map[string][]string, len(src.Label)),
|
||||
NumLabel: make(map[string][]int64, len(src.NumLabel)),
|
||||
NumUnit: make(map[string][]string, len(src.NumLabel)),
|
||||
}
|
||||
for i, l := range src.Location {
|
||||
s.Location[i] = pm.mapLocation(l)
|
||||
}
|
||||
for k, v := range src.Label {
|
||||
vv := make([]string, len(v))
|
||||
copy(vv, v)
|
||||
s.Label[k] = vv
|
||||
}
|
||||
for k, v := range src.NumLabel {
|
||||
u := src.NumUnit[k]
|
||||
vv := make([]int64, len(v))
|
||||
uu := make([]string, len(u))
|
||||
copy(vv, v)
|
||||
copy(uu, u)
|
||||
s.NumLabel[k] = vv
|
||||
s.NumUnit[k] = uu
|
||||
}
|
||||
// Check memoization table. Must be done on the remapped location to
|
||||
// account for the remapped mapping. Add current values to the
|
||||
// existing sample.
|
||||
k := s.key()
|
||||
if ss, ok := pm.samples[k]; ok {
|
||||
for i, v := range src.Value {
|
||||
ss.Value[i] += v
|
||||
}
|
||||
return ss
|
||||
}
|
||||
copy(s.Value, src.Value)
|
||||
pm.samples[k] = s
|
||||
pm.p.Sample = append(pm.p.Sample, s)
|
||||
return s
|
||||
}
|
||||
|
||||
// key generates sampleKey to be used as a key for maps.
|
||||
func (sample *Sample) key() sampleKey {
|
||||
ids := make([]string, len(sample.Location))
|
||||
for i, l := range sample.Location {
|
||||
ids[i] = strconv.FormatUint(l.ID, 16)
|
||||
}
|
||||
|
||||
labels := make([]string, 0, len(sample.Label))
|
||||
for k, v := range sample.Label {
|
||||
labels = append(labels, fmt.Sprintf("%q%q", k, v))
|
||||
}
|
||||
sort.Strings(labels)
|
||||
|
||||
numlabels := make([]string, 0, len(sample.NumLabel))
|
||||
for k, v := range sample.NumLabel {
|
||||
numlabels = append(numlabels, fmt.Sprintf("%q%x%x", k, v, sample.NumUnit[k]))
|
||||
}
|
||||
sort.Strings(numlabels)
|
||||
|
||||
return sampleKey{
|
||||
strings.Join(ids, "|"),
|
||||
strings.Join(labels, ""),
|
||||
strings.Join(numlabels, ""),
|
||||
}
|
||||
}
|
||||
|
||||
type sampleKey struct {
|
||||
locations string
|
||||
labels string
|
||||
numlabels string
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapLocation(src *Location) *Location {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if l, ok := pm.locationsByID[src.ID]; ok {
|
||||
return l
|
||||
}
|
||||
|
||||
mi := pm.mapMapping(src.Mapping)
|
||||
l := &Location{
|
||||
ID: uint64(len(pm.p.Location) + 1),
|
||||
Mapping: mi.m,
|
||||
Address: uint64(int64(src.Address) + mi.offset),
|
||||
Line: make([]Line, len(src.Line)),
|
||||
IsFolded: src.IsFolded,
|
||||
}
|
||||
for i, ln := range src.Line {
|
||||
l.Line[i] = pm.mapLine(ln)
|
||||
}
|
||||
// Check memoization table. Must be done on the remapped location to
|
||||
// account for the remapped mapping ID.
|
||||
k := l.key()
|
||||
if ll, ok := pm.locations[k]; ok {
|
||||
pm.locationsByID[src.ID] = ll
|
||||
return ll
|
||||
}
|
||||
pm.locationsByID[src.ID] = l
|
||||
pm.locations[k] = l
|
||||
pm.p.Location = append(pm.p.Location, l)
|
||||
return l
|
||||
}
|
||||
|
||||
// key generates locationKey to be used as a key for maps.
|
||||
func (l *Location) key() locationKey {
|
||||
key := locationKey{
|
||||
addr: l.Address,
|
||||
isFolded: l.IsFolded,
|
||||
}
|
||||
if l.Mapping != nil {
|
||||
// Normalizes address to handle address space randomization.
|
||||
key.addr -= l.Mapping.Start
|
||||
key.mappingID = l.Mapping.ID
|
||||
}
|
||||
lines := make([]string, len(l.Line)*2)
|
||||
for i, line := range l.Line {
|
||||
if line.Function != nil {
|
||||
lines[i*2] = strconv.FormatUint(line.Function.ID, 16)
|
||||
}
|
||||
lines[i*2+1] = strconv.FormatInt(line.Line, 16)
|
||||
}
|
||||
key.lines = strings.Join(lines, "|")
|
||||
return key
|
||||
}
|
||||
|
||||
type locationKey struct {
|
||||
addr, mappingID uint64
|
||||
lines string
|
||||
isFolded bool
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapMapping(src *Mapping) mapInfo {
|
||||
if src == nil {
|
||||
return mapInfo{}
|
||||
}
|
||||
|
||||
if mi, ok := pm.mappingsByID[src.ID]; ok {
|
||||
return mi
|
||||
}
|
||||
|
||||
// Check memoization tables.
|
||||
mk := src.key()
|
||||
if m, ok := pm.mappings[mk]; ok {
|
||||
mi := mapInfo{m, int64(m.Start) - int64(src.Start)}
|
||||
pm.mappingsByID[src.ID] = mi
|
||||
return mi
|
||||
}
|
||||
m := &Mapping{
|
||||
ID: uint64(len(pm.p.Mapping) + 1),
|
||||
Start: src.Start,
|
||||
Limit: src.Limit,
|
||||
Offset: src.Offset,
|
||||
File: src.File,
|
||||
BuildID: src.BuildID,
|
||||
HasFunctions: src.HasFunctions,
|
||||
HasFilenames: src.HasFilenames,
|
||||
HasLineNumbers: src.HasLineNumbers,
|
||||
HasInlineFrames: src.HasInlineFrames,
|
||||
}
|
||||
pm.p.Mapping = append(pm.p.Mapping, m)
|
||||
|
||||
// Update memoization tables.
|
||||
pm.mappings[mk] = m
|
||||
mi := mapInfo{m, 0}
|
||||
pm.mappingsByID[src.ID] = mi
|
||||
return mi
|
||||
}
|
||||
|
||||
// key generates encoded strings of Mapping to be used as a key for
|
||||
// maps.
|
||||
func (m *Mapping) key() mappingKey {
|
||||
// Normalize addresses to handle address space randomization.
|
||||
// Round up to next 4K boundary to avoid minor discrepancies.
|
||||
const mapsizeRounding = 0x1000
|
||||
|
||||
size := m.Limit - m.Start
|
||||
size = size + mapsizeRounding - 1
|
||||
size = size - (size % mapsizeRounding)
|
||||
key := mappingKey{
|
||||
size: size,
|
||||
offset: m.Offset,
|
||||
}
|
||||
|
||||
switch {
|
||||
case m.BuildID != "":
|
||||
key.buildIDOrFile = m.BuildID
|
||||
case m.File != "":
|
||||
key.buildIDOrFile = m.File
|
||||
default:
|
||||
// A mapping containing neither build ID nor file name is a fake mapping. A
|
||||
// key with empty buildIDOrFile is used for fake mappings so that they are
|
||||
// treated as the same mapping during merging.
|
||||
}
|
||||
return key
|
||||
}
|
||||
|
||||
type mappingKey struct {
|
||||
size, offset uint64
|
||||
buildIDOrFile string
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapLine(src Line) Line {
|
||||
ln := Line{
|
||||
Function: pm.mapFunction(src.Function),
|
||||
Line: src.Line,
|
||||
}
|
||||
return ln
|
||||
}
|
||||
|
||||
func (pm *profileMerger) mapFunction(src *Function) *Function {
|
||||
if src == nil {
|
||||
return nil
|
||||
}
|
||||
if f, ok := pm.functionsByID[src.ID]; ok {
|
||||
return f
|
||||
}
|
||||
k := src.key()
|
||||
if f, ok := pm.functions[k]; ok {
|
||||
pm.functionsByID[src.ID] = f
|
||||
return f
|
||||
}
|
||||
f := &Function{
|
||||
ID: uint64(len(pm.p.Function) + 1),
|
||||
Name: src.Name,
|
||||
SystemName: src.SystemName,
|
||||
Filename: src.Filename,
|
||||
StartLine: src.StartLine,
|
||||
}
|
||||
pm.functions[k] = f
|
||||
pm.functionsByID[src.ID] = f
|
||||
pm.p.Function = append(pm.p.Function, f)
|
||||
return f
|
||||
}
|
||||
|
||||
// key generates a struct to be used as a key for maps.
|
||||
func (f *Function) key() functionKey {
|
||||
return functionKey{
|
||||
f.StartLine,
|
||||
f.Name,
|
||||
f.SystemName,
|
||||
f.Filename,
|
||||
}
|
||||
}
|
||||
|
||||
type functionKey struct {
|
||||
startLine int64
|
||||
name, systemName, fileName string
|
||||
}
|
||||
|
||||
// combineHeaders checks that all profiles can be merged and returns
|
||||
// their combined profile.
|
||||
func combineHeaders(srcs []*Profile) (*Profile, error) {
|
||||
for _, s := range srcs[1:] {
|
||||
if err := srcs[0].compatible(s); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
var timeNanos, durationNanos, period int64
|
||||
var comments []string
|
||||
seenComments := map[string]bool{}
|
||||
var defaultSampleType string
|
||||
for _, s := range srcs {
|
||||
if timeNanos == 0 || s.TimeNanos < timeNanos {
|
||||
timeNanos = s.TimeNanos
|
||||
}
|
||||
durationNanos += s.DurationNanos
|
||||
if period == 0 || period < s.Period {
|
||||
period = s.Period
|
||||
}
|
||||
for _, c := range s.Comments {
|
||||
if seen := seenComments[c]; !seen {
|
||||
comments = append(comments, c)
|
||||
seenComments[c] = true
|
||||
}
|
||||
}
|
||||
if defaultSampleType == "" {
|
||||
defaultSampleType = s.DefaultSampleType
|
||||
}
|
||||
}
|
||||
|
||||
p := &Profile{
|
||||
SampleType: make([]*ValueType, len(srcs[0].SampleType)),
|
||||
|
||||
DropFrames: srcs[0].DropFrames,
|
||||
KeepFrames: srcs[0].KeepFrames,
|
||||
|
||||
TimeNanos: timeNanos,
|
||||
DurationNanos: durationNanos,
|
||||
PeriodType: srcs[0].PeriodType,
|
||||
Period: period,
|
||||
|
||||
Comments: comments,
|
||||
DefaultSampleType: defaultSampleType,
|
||||
}
|
||||
copy(p.SampleType, srcs[0].SampleType)
|
||||
return p, nil
|
||||
}
|
||||
|
||||
// compatible determines if two profiles can be compared/merged.
|
||||
// returns nil if the profiles are compatible; otherwise an error with
|
||||
// details on the incompatibility.
|
||||
func (p *Profile) compatible(pb *Profile) error {
|
||||
if !equalValueType(p.PeriodType, pb.PeriodType) {
|
||||
return fmt.Errorf("incompatible period types %v and %v", p.PeriodType, pb.PeriodType)
|
||||
}
|
||||
|
||||
if len(p.SampleType) != len(pb.SampleType) {
|
||||
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
|
||||
}
|
||||
|
||||
for i := range p.SampleType {
|
||||
if !equalValueType(p.SampleType[i], pb.SampleType[i]) {
|
||||
return fmt.Errorf("incompatible sample types %v and %v", p.SampleType, pb.SampleType)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// equalValueType returns true if the two value types are semantically
|
||||
// equal. It ignores the internal fields used during encode/decode.
|
||||
func equalValueType(st1, st2 *ValueType) bool {
|
||||
return st1.Type == st2.Type && st1.Unit == st2.Unit
|
||||
}
|
|
@ -0,0 +1,805 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package profile provides a representation of profile.proto and
|
||||
// methods to encode/decode profiles in this format.
|
||||
package profile
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Profile is an in-memory representation of profile.proto.
|
||||
type Profile struct {
|
||||
SampleType []*ValueType
|
||||
DefaultSampleType string
|
||||
Sample []*Sample
|
||||
Mapping []*Mapping
|
||||
Location []*Location
|
||||
Function []*Function
|
||||
Comments []string
|
||||
|
||||
DropFrames string
|
||||
KeepFrames string
|
||||
|
||||
TimeNanos int64
|
||||
DurationNanos int64
|
||||
PeriodType *ValueType
|
||||
Period int64
|
||||
|
||||
// The following fields are modified during encoding and copying,
|
||||
// so are protected by a Mutex.
|
||||
encodeMu sync.Mutex
|
||||
|
||||
commentX []int64
|
||||
dropFramesX int64
|
||||
keepFramesX int64
|
||||
stringTable []string
|
||||
defaultSampleTypeX int64
|
||||
}
|
||||
|
||||
// ValueType corresponds to Profile.ValueType
|
||||
type ValueType struct {
|
||||
Type string // cpu, wall, inuse_space, etc
|
||||
Unit string // seconds, nanoseconds, bytes, etc
|
||||
|
||||
typeX int64
|
||||
unitX int64
|
||||
}
|
||||
|
||||
// Sample corresponds to Profile.Sample
|
||||
type Sample struct {
|
||||
Location []*Location
|
||||
Value []int64
|
||||
Label map[string][]string
|
||||
NumLabel map[string][]int64
|
||||
NumUnit map[string][]string
|
||||
|
||||
locationIDX []uint64
|
||||
labelX []label
|
||||
}
|
||||
|
||||
// label corresponds to Profile.Label
|
||||
type label struct {
|
||||
keyX int64
|
||||
// Exactly one of the two following values must be set
|
||||
strX int64
|
||||
numX int64 // Integer value for this label
|
||||
// can be set if numX has value
|
||||
unitX int64
|
||||
}
|
||||
|
||||
// Mapping corresponds to Profile.Mapping
|
||||
type Mapping struct {
|
||||
ID uint64
|
||||
Start uint64
|
||||
Limit uint64
|
||||
Offset uint64
|
||||
File string
|
||||
BuildID string
|
||||
HasFunctions bool
|
||||
HasFilenames bool
|
||||
HasLineNumbers bool
|
||||
HasInlineFrames bool
|
||||
|
||||
fileX int64
|
||||
buildIDX int64
|
||||
}
|
||||
|
||||
// Location corresponds to Profile.Location
|
||||
type Location struct {
|
||||
ID uint64
|
||||
Mapping *Mapping
|
||||
Address uint64
|
||||
Line []Line
|
||||
IsFolded bool
|
||||
|
||||
mappingIDX uint64
|
||||
}
|
||||
|
||||
// Line corresponds to Profile.Line
|
||||
type Line struct {
|
||||
Function *Function
|
||||
Line int64
|
||||
|
||||
functionIDX uint64
|
||||
}
|
||||
|
||||
// Function corresponds to Profile.Function
|
||||
type Function struct {
|
||||
ID uint64
|
||||
Name string
|
||||
SystemName string
|
||||
Filename string
|
||||
StartLine int64
|
||||
|
||||
nameX int64
|
||||
systemNameX int64
|
||||
filenameX int64
|
||||
}
|
||||
|
||||
// Parse parses a profile and checks for its validity. The input
|
||||
// may be a gzip-compressed encoded protobuf or one of many legacy
|
||||
// profile formats which may be unsupported in the future.
|
||||
func Parse(r io.Reader) (*Profile, error) {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ParseData(data)
|
||||
}
|
||||
|
||||
// ParseData parses a profile from a buffer and checks for its
|
||||
// validity.
|
||||
func ParseData(data []byte) (*Profile, error) {
|
||||
var p *Profile
|
||||
var err error
|
||||
if len(data) >= 2 && data[0] == 0x1f && data[1] == 0x8b {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err == nil {
|
||||
data, err = ioutil.ReadAll(gz)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("decompressing profile: %v", err)
|
||||
}
|
||||
}
|
||||
if p, err = ParseUncompressed(data); err != nil && err != errNoData && err != errConcatProfile {
|
||||
p, err = parseLegacy(data)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("parsing profile: %v", err)
|
||||
}
|
||||
|
||||
if err := p.CheckValid(); err != nil {
|
||||
return nil, fmt.Errorf("malformed profile: %v", err)
|
||||
}
|
||||
return p, nil
|
||||
}
|
||||
|
||||
var errUnrecognized = fmt.Errorf("unrecognized profile format")
|
||||
var errMalformed = fmt.Errorf("malformed profile format")
|
||||
var errNoData = fmt.Errorf("empty input file")
|
||||
var errConcatProfile = fmt.Errorf("concatenated profiles detected")
|
||||
|
||||
func parseLegacy(data []byte) (*Profile, error) {
|
||||
parsers := []func([]byte) (*Profile, error){
|
||||
parseCPU,
|
||||
parseHeap,
|
||||
parseGoCount, // goroutine, threadcreate
|
||||
parseThread,
|
||||
parseContention,
|
||||
parseJavaProfile,
|
||||
}
|
||||
|
||||
for _, parser := range parsers {
|
||||
p, err := parser(data)
|
||||
if err == nil {
|
||||
p.addLegacyFrameInfo()
|
||||
return p, nil
|
||||
}
|
||||
if err != errUnrecognized {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil, errUnrecognized
|
||||
}
|
||||
|
||||
// ParseUncompressed parses an uncompressed protobuf into a profile.
|
||||
func ParseUncompressed(data []byte) (*Profile, error) {
|
||||
if len(data) == 0 {
|
||||
return nil, errNoData
|
||||
}
|
||||
p := &Profile{}
|
||||
if err := unmarshal(data, p); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := p.postDecode(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
var libRx = regexp.MustCompile(`([.]so$|[.]so[._][0-9]+)`)
|
||||
|
||||
// massageMappings applies heuristic-based changes to the profile
|
||||
// mappings to account for quirks of some environments.
|
||||
func (p *Profile) massageMappings() {
|
||||
// Merge adjacent regions with matching names, checking that the offsets match
|
||||
if len(p.Mapping) > 1 {
|
||||
mappings := []*Mapping{p.Mapping[0]}
|
||||
for _, m := range p.Mapping[1:] {
|
||||
lm := mappings[len(mappings)-1]
|
||||
if adjacent(lm, m) {
|
||||
lm.Limit = m.Limit
|
||||
if m.File != "" {
|
||||
lm.File = m.File
|
||||
}
|
||||
if m.BuildID != "" {
|
||||
lm.BuildID = m.BuildID
|
||||
}
|
||||
p.updateLocationMapping(m, lm)
|
||||
continue
|
||||
}
|
||||
mappings = append(mappings, m)
|
||||
}
|
||||
p.Mapping = mappings
|
||||
}
|
||||
|
||||
// Use heuristics to identify main binary and move it to the top of the list of mappings
|
||||
for i, m := range p.Mapping {
|
||||
file := strings.TrimSpace(strings.Replace(m.File, "(deleted)", "", -1))
|
||||
if len(file) == 0 {
|
||||
continue
|
||||
}
|
||||
if len(libRx.FindStringSubmatch(file)) > 0 {
|
||||
continue
|
||||
}
|
||||
if file[0] == '[' {
|
||||
continue
|
||||
}
|
||||
// Swap what we guess is main to position 0.
|
||||
p.Mapping[0], p.Mapping[i] = p.Mapping[i], p.Mapping[0]
|
||||
break
|
||||
}
|
||||
|
||||
// Keep the mapping IDs neatly sorted
|
||||
for i, m := range p.Mapping {
|
||||
m.ID = uint64(i + 1)
|
||||
}
|
||||
}
|
||||
|
||||
// adjacent returns whether two mapping entries represent the same
|
||||
// mapping that has been split into two. Check that their addresses are adjacent,
|
||||
// and if the offsets match, if they are available.
|
||||
func adjacent(m1, m2 *Mapping) bool {
|
||||
if m1.File != "" && m2.File != "" {
|
||||
if m1.File != m2.File {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if m1.BuildID != "" && m2.BuildID != "" {
|
||||
if m1.BuildID != m2.BuildID {
|
||||
return false
|
||||
}
|
||||
}
|
||||
if m1.Limit != m2.Start {
|
||||
return false
|
||||
}
|
||||
if m1.Offset != 0 && m2.Offset != 0 {
|
||||
offset := m1.Offset + (m1.Limit - m1.Start)
|
||||
if offset != m2.Offset {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (p *Profile) updateLocationMapping(from, to *Mapping) {
|
||||
for _, l := range p.Location {
|
||||
if l.Mapping == from {
|
||||
l.Mapping = to
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func serialize(p *Profile) []byte {
|
||||
p.encodeMu.Lock()
|
||||
p.preEncode()
|
||||
b := marshal(p)
|
||||
p.encodeMu.Unlock()
|
||||
return b
|
||||
}
|
||||
|
||||
// Write writes the profile as a gzip-compressed marshaled protobuf.
|
||||
func (p *Profile) Write(w io.Writer) error {
|
||||
zw := gzip.NewWriter(w)
|
||||
defer zw.Close()
|
||||
_, err := zw.Write(serialize(p))
|
||||
return err
|
||||
}
|
||||
|
||||
// WriteUncompressed writes the profile as a marshaled protobuf.
|
||||
func (p *Profile) WriteUncompressed(w io.Writer) error {
|
||||
_, err := w.Write(serialize(p))
|
||||
return err
|
||||
}
|
||||
|
||||
// CheckValid tests whether the profile is valid. Checks include, but are
|
||||
// not limited to:
|
||||
// - len(Profile.Sample[n].value) == len(Profile.value_unit)
|
||||
// - Sample.id has a corresponding Profile.Location
|
||||
func (p *Profile) CheckValid() error {
|
||||
// Check that sample values are consistent
|
||||
sampleLen := len(p.SampleType)
|
||||
if sampleLen == 0 && len(p.Sample) != 0 {
|
||||
return fmt.Errorf("missing sample type information")
|
||||
}
|
||||
for _, s := range p.Sample {
|
||||
if s == nil {
|
||||
return fmt.Errorf("profile has nil sample")
|
||||
}
|
||||
if len(s.Value) != sampleLen {
|
||||
return fmt.Errorf("mismatch: sample has %d values vs. %d types", len(s.Value), len(p.SampleType))
|
||||
}
|
||||
for _, l := range s.Location {
|
||||
if l == nil {
|
||||
return fmt.Errorf("sample has nil location")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Check that all mappings/locations/functions are in the tables
|
||||
// Check that there are no duplicate ids
|
||||
mappings := make(map[uint64]*Mapping, len(p.Mapping))
|
||||
for _, m := range p.Mapping {
|
||||
if m == nil {
|
||||
return fmt.Errorf("profile has nil mapping")
|
||||
}
|
||||
if m.ID == 0 {
|
||||
return fmt.Errorf("found mapping with reserved ID=0")
|
||||
}
|
||||
if mappings[m.ID] != nil {
|
||||
return fmt.Errorf("multiple mappings with same id: %d", m.ID)
|
||||
}
|
||||
mappings[m.ID] = m
|
||||
}
|
||||
functions := make(map[uint64]*Function, len(p.Function))
|
||||
for _, f := range p.Function {
|
||||
if f == nil {
|
||||
return fmt.Errorf("profile has nil function")
|
||||
}
|
||||
if f.ID == 0 {
|
||||
return fmt.Errorf("found function with reserved ID=0")
|
||||
}
|
||||
if functions[f.ID] != nil {
|
||||
return fmt.Errorf("multiple functions with same id: %d", f.ID)
|
||||
}
|
||||
functions[f.ID] = f
|
||||
}
|
||||
locations := make(map[uint64]*Location, len(p.Location))
|
||||
for _, l := range p.Location {
|
||||
if l == nil {
|
||||
return fmt.Errorf("profile has nil location")
|
||||
}
|
||||
if l.ID == 0 {
|
||||
return fmt.Errorf("found location with reserved id=0")
|
||||
}
|
||||
if locations[l.ID] != nil {
|
||||
return fmt.Errorf("multiple locations with same id: %d", l.ID)
|
||||
}
|
||||
locations[l.ID] = l
|
||||
if m := l.Mapping; m != nil {
|
||||
if m.ID == 0 || mappings[m.ID] != m {
|
||||
return fmt.Errorf("inconsistent mapping %p: %d", m, m.ID)
|
||||
}
|
||||
}
|
||||
for _, ln := range l.Line {
|
||||
f := ln.Function
|
||||
if f == nil {
|
||||
return fmt.Errorf("location id: %d has a line with nil function", l.ID)
|
||||
}
|
||||
if f.ID == 0 || functions[f.ID] != f {
|
||||
return fmt.Errorf("inconsistent function %p: %d", f, f.ID)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Aggregate merges the locations in the profile into equivalence
|
||||
// classes preserving the request attributes. It also updates the
|
||||
// samples to point to the merged locations.
|
||||
func (p *Profile) Aggregate(inlineFrame, function, filename, linenumber, address bool) error {
|
||||
for _, m := range p.Mapping {
|
||||
m.HasInlineFrames = m.HasInlineFrames && inlineFrame
|
||||
m.HasFunctions = m.HasFunctions && function
|
||||
m.HasFilenames = m.HasFilenames && filename
|
||||
m.HasLineNumbers = m.HasLineNumbers && linenumber
|
||||
}
|
||||
|
||||
// Aggregate functions
|
||||
if !function || !filename {
|
||||
for _, f := range p.Function {
|
||||
if !function {
|
||||
f.Name = ""
|
||||
f.SystemName = ""
|
||||
}
|
||||
if !filename {
|
||||
f.Filename = ""
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Aggregate locations
|
||||
if !inlineFrame || !address || !linenumber {
|
||||
for _, l := range p.Location {
|
||||
if !inlineFrame && len(l.Line) > 1 {
|
||||
l.Line = l.Line[len(l.Line)-1:]
|
||||
}
|
||||
if !linenumber {
|
||||
for i := range l.Line {
|
||||
l.Line[i].Line = 0
|
||||
}
|
||||
}
|
||||
if !address {
|
||||
l.Address = 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return p.CheckValid()
|
||||
}
|
||||
|
||||
// NumLabelUnits returns a map of numeric label keys to the units
|
||||
// associated with those keys and a map of those keys to any units
|
||||
// that were encountered but not used.
|
||||
// Unit for a given key is the first encountered unit for that key. If multiple
|
||||
// units are encountered for values paired with a particular key, then the first
|
||||
// unit encountered is used and all other units are returned in sorted order
|
||||
// in map of ignored units.
|
||||
// If no units are encountered for a particular key, the unit is then inferred
|
||||
// based on the key.
|
||||
func (p *Profile) NumLabelUnits() (map[string]string, map[string][]string) {
|
||||
numLabelUnits := map[string]string{}
|
||||
ignoredUnits := map[string]map[string]bool{}
|
||||
encounteredKeys := map[string]bool{}
|
||||
|
||||
// Determine units based on numeric tags for each sample.
|
||||
for _, s := range p.Sample {
|
||||
for k := range s.NumLabel {
|
||||
encounteredKeys[k] = true
|
||||
for _, unit := range s.NumUnit[k] {
|
||||
if unit == "" {
|
||||
continue
|
||||
}
|
||||
if wantUnit, ok := numLabelUnits[k]; !ok {
|
||||
numLabelUnits[k] = unit
|
||||
} else if wantUnit != unit {
|
||||
if v, ok := ignoredUnits[k]; ok {
|
||||
v[unit] = true
|
||||
} else {
|
||||
ignoredUnits[k] = map[string]bool{unit: true}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Infer units for keys without any units associated with
|
||||
// numeric tag values.
|
||||
for key := range encounteredKeys {
|
||||
unit := numLabelUnits[key]
|
||||
if unit == "" {
|
||||
switch key {
|
||||
case "alignment", "request":
|
||||
numLabelUnits[key] = "bytes"
|
||||
default:
|
||||
numLabelUnits[key] = key
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Copy ignored units into more readable format
|
||||
unitsIgnored := make(map[string][]string, len(ignoredUnits))
|
||||
for key, values := range ignoredUnits {
|
||||
units := make([]string, len(values))
|
||||
i := 0
|
||||
for unit := range values {
|
||||
units[i] = unit
|
||||
i++
|
||||
}
|
||||
sort.Strings(units)
|
||||
unitsIgnored[key] = units
|
||||
}
|
||||
|
||||
return numLabelUnits, unitsIgnored
|
||||
}
|
||||
|
||||
// String dumps a text representation of a profile. Intended mainly
|
||||
// for debugging purposes.
|
||||
func (p *Profile) String() string {
|
||||
ss := make([]string, 0, len(p.Comments)+len(p.Sample)+len(p.Mapping)+len(p.Location))
|
||||
for _, c := range p.Comments {
|
||||
ss = append(ss, "Comment: "+c)
|
||||
}
|
||||
if pt := p.PeriodType; pt != nil {
|
||||
ss = append(ss, fmt.Sprintf("PeriodType: %s %s", pt.Type, pt.Unit))
|
||||
}
|
||||
ss = append(ss, fmt.Sprintf("Period: %d", p.Period))
|
||||
if p.TimeNanos != 0 {
|
||||
ss = append(ss, fmt.Sprintf("Time: %v", time.Unix(0, p.TimeNanos)))
|
||||
}
|
||||
if p.DurationNanos != 0 {
|
||||
ss = append(ss, fmt.Sprintf("Duration: %.4v", time.Duration(p.DurationNanos)))
|
||||
}
|
||||
|
||||
ss = append(ss, "Samples:")
|
||||
var sh1 string
|
||||
for _, s := range p.SampleType {
|
||||
dflt := ""
|
||||
if s.Type == p.DefaultSampleType {
|
||||
dflt = "[dflt]"
|
||||
}
|
||||
sh1 = sh1 + fmt.Sprintf("%s/%s%s ", s.Type, s.Unit, dflt)
|
||||
}
|
||||
ss = append(ss, strings.TrimSpace(sh1))
|
||||
for _, s := range p.Sample {
|
||||
ss = append(ss, s.string())
|
||||
}
|
||||
|
||||
ss = append(ss, "Locations")
|
||||
for _, l := range p.Location {
|
||||
ss = append(ss, l.string())
|
||||
}
|
||||
|
||||
ss = append(ss, "Mappings")
|
||||
for _, m := range p.Mapping {
|
||||
ss = append(ss, m.string())
|
||||
}
|
||||
|
||||
return strings.Join(ss, "\n") + "\n"
|
||||
}
|
||||
|
||||
// string dumps a text representation of a mapping. Intended mainly
|
||||
// for debugging purposes.
|
||||
func (m *Mapping) string() string {
|
||||
bits := ""
|
||||
if m.HasFunctions {
|
||||
bits = bits + "[FN]"
|
||||
}
|
||||
if m.HasFilenames {
|
||||
bits = bits + "[FL]"
|
||||
}
|
||||
if m.HasLineNumbers {
|
||||
bits = bits + "[LN]"
|
||||
}
|
||||
if m.HasInlineFrames {
|
||||
bits = bits + "[IN]"
|
||||
}
|
||||
return fmt.Sprintf("%d: %#x/%#x/%#x %s %s %s",
|
||||
m.ID,
|
||||
m.Start, m.Limit, m.Offset,
|
||||
m.File,
|
||||
m.BuildID,
|
||||
bits)
|
||||
}
|
||||
|
||||
// string dumps a text representation of a location. Intended mainly
|
||||
// for debugging purposes.
|
||||
func (l *Location) string() string {
|
||||
ss := []string{}
|
||||
locStr := fmt.Sprintf("%6d: %#x ", l.ID, l.Address)
|
||||
if m := l.Mapping; m != nil {
|
||||
locStr = locStr + fmt.Sprintf("M=%d ", m.ID)
|
||||
}
|
||||
if l.IsFolded {
|
||||
locStr = locStr + "[F] "
|
||||
}
|
||||
if len(l.Line) == 0 {
|
||||
ss = append(ss, locStr)
|
||||
}
|
||||
for li := range l.Line {
|
||||
lnStr := "??"
|
||||
if fn := l.Line[li].Function; fn != nil {
|
||||
lnStr = fmt.Sprintf("%s %s:%d s=%d",
|
||||
fn.Name,
|
||||
fn.Filename,
|
||||
l.Line[li].Line,
|
||||
fn.StartLine)
|
||||
if fn.Name != fn.SystemName {
|
||||
lnStr = lnStr + "(" + fn.SystemName + ")"
|
||||
}
|
||||
}
|
||||
ss = append(ss, locStr+lnStr)
|
||||
// Do not print location details past the first line
|
||||
locStr = " "
|
||||
}
|
||||
return strings.Join(ss, "\n")
|
||||
}
|
||||
|
||||
// string dumps a text representation of a sample. Intended mainly
|
||||
// for debugging purposes.
|
||||
func (s *Sample) string() string {
|
||||
ss := []string{}
|
||||
var sv string
|
||||
for _, v := range s.Value {
|
||||
sv = fmt.Sprintf("%s %10d", sv, v)
|
||||
}
|
||||
sv = sv + ": "
|
||||
for _, l := range s.Location {
|
||||
sv = sv + fmt.Sprintf("%d ", l.ID)
|
||||
}
|
||||
ss = append(ss, sv)
|
||||
const labelHeader = " "
|
||||
if len(s.Label) > 0 {
|
||||
ss = append(ss, labelHeader+labelsToString(s.Label))
|
||||
}
|
||||
if len(s.NumLabel) > 0 {
|
||||
ss = append(ss, labelHeader+numLabelsToString(s.NumLabel, s.NumUnit))
|
||||
}
|
||||
return strings.Join(ss, "\n")
|
||||
}
|
||||
|
||||
// labelsToString returns a string representation of a
|
||||
// map representing labels.
|
||||
func labelsToString(labels map[string][]string) string {
|
||||
ls := []string{}
|
||||
for k, v := range labels {
|
||||
ls = append(ls, fmt.Sprintf("%s:%v", k, v))
|
||||
}
|
||||
sort.Strings(ls)
|
||||
return strings.Join(ls, " ")
|
||||
}
|
||||
|
||||
// numLabelsToString returns a string representation of a map
|
||||
// representing numeric labels.
|
||||
func numLabelsToString(numLabels map[string][]int64, numUnits map[string][]string) string {
|
||||
ls := []string{}
|
||||
for k, v := range numLabels {
|
||||
units := numUnits[k]
|
||||
var labelString string
|
||||
if len(units) == len(v) {
|
||||
values := make([]string, len(v))
|
||||
for i, vv := range v {
|
||||
values[i] = fmt.Sprintf("%d %s", vv, units[i])
|
||||
}
|
||||
labelString = fmt.Sprintf("%s:%v", k, values)
|
||||
} else {
|
||||
labelString = fmt.Sprintf("%s:%v", k, v)
|
||||
}
|
||||
ls = append(ls, labelString)
|
||||
}
|
||||
sort.Strings(ls)
|
||||
return strings.Join(ls, " ")
|
||||
}
|
||||
|
||||
// SetLabel sets the specified key to the specified value for all samples in the
|
||||
// profile.
|
||||
func (p *Profile) SetLabel(key string, value []string) {
|
||||
for _, sample := range p.Sample {
|
||||
if sample.Label == nil {
|
||||
sample.Label = map[string][]string{key: value}
|
||||
} else {
|
||||
sample.Label[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveLabel removes all labels associated with the specified key for all
|
||||
// samples in the profile.
|
||||
func (p *Profile) RemoveLabel(key string) {
|
||||
for _, sample := range p.Sample {
|
||||
delete(sample.Label, key)
|
||||
}
|
||||
}
|
||||
|
||||
// HasLabel returns true if a sample has a label with indicated key and value.
|
||||
func (s *Sample) HasLabel(key, value string) bool {
|
||||
for _, v := range s.Label[key] {
|
||||
if v == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// DiffBaseSample returns true if a sample belongs to the diff base and false
|
||||
// otherwise.
|
||||
func (s *Sample) DiffBaseSample() bool {
|
||||
return s.HasLabel("pprof::base", "true")
|
||||
}
|
||||
|
||||
// Scale multiplies all sample values in a profile by a constant and keeps
|
||||
// only samples that have at least one non-zero value.
|
||||
func (p *Profile) Scale(ratio float64) {
|
||||
if ratio == 1 {
|
||||
return
|
||||
}
|
||||
ratios := make([]float64, len(p.SampleType))
|
||||
for i := range p.SampleType {
|
||||
ratios[i] = ratio
|
||||
}
|
||||
p.ScaleN(ratios)
|
||||
}
|
||||
|
||||
// ScaleN multiplies each sample values in a sample by a different amount
|
||||
// and keeps only samples that have at least one non-zero value.
|
||||
func (p *Profile) ScaleN(ratios []float64) error {
|
||||
if len(p.SampleType) != len(ratios) {
|
||||
return fmt.Errorf("mismatched scale ratios, got %d, want %d", len(ratios), len(p.SampleType))
|
||||
}
|
||||
allOnes := true
|
||||
for _, r := range ratios {
|
||||
if r != 1 {
|
||||
allOnes = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if allOnes {
|
||||
return nil
|
||||
}
|
||||
fillIdx := 0
|
||||
for _, s := range p.Sample {
|
||||
keepSample := false
|
||||
for i, v := range s.Value {
|
||||
if ratios[i] != 1 {
|
||||
val := int64(math.Round(float64(v) * ratios[i]))
|
||||
s.Value[i] = val
|
||||
keepSample = keepSample || val != 0
|
||||
}
|
||||
}
|
||||
if keepSample {
|
||||
p.Sample[fillIdx] = s
|
||||
fillIdx++
|
||||
}
|
||||
}
|
||||
p.Sample = p.Sample[:fillIdx]
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasFunctions determines if all locations in this profile have
|
||||
// symbolized function information.
|
||||
func (p *Profile) HasFunctions() bool {
|
||||
for _, l := range p.Location {
|
||||
if l.Mapping != nil && !l.Mapping.HasFunctions {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// HasFileLines determines if all locations in this profile have
|
||||
// symbolized file and line number information.
|
||||
func (p *Profile) HasFileLines() bool {
|
||||
for _, l := range p.Location {
|
||||
if l.Mapping != nil && (!l.Mapping.HasFilenames || !l.Mapping.HasLineNumbers) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Unsymbolizable returns true if a mapping points to a binary for which
|
||||
// locations can't be symbolized in principle, at least now. Examples are
|
||||
// "[vdso]", [vsyscall]" and some others, see the code.
|
||||
func (m *Mapping) Unsymbolizable() bool {
|
||||
name := filepath.Base(m.File)
|
||||
return strings.HasPrefix(name, "[") || strings.HasPrefix(name, "linux-vdso") || strings.HasPrefix(m.File, "/dev/dri/")
|
||||
}
|
||||
|
||||
// Copy makes a fully independent copy of a profile.
|
||||
func (p *Profile) Copy() *Profile {
|
||||
pp := &Profile{}
|
||||
if err := unmarshal(serialize(p), pp); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
if err := pp.postDecode(); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return pp
|
||||
}
|
|
@ -0,0 +1,370 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file is a simple protocol buffer encoder and decoder.
|
||||
// The format is described at
|
||||
// https://developers.google.com/protocol-buffers/docs/encoding
|
||||
//
|
||||
// A protocol message must implement the message interface:
|
||||
// decoder() []decoder
|
||||
// encode(*buffer)
|
||||
//
|
||||
// The decode method returns a slice indexed by field number that gives the
|
||||
// function to decode that field.
|
||||
// The encode method encodes its receiver into the given buffer.
|
||||
//
|
||||
// The two methods are simple enough to be implemented by hand rather than
|
||||
// by using a protocol compiler.
|
||||
//
|
||||
// See profile.go for examples of messages implementing this interface.
|
||||
//
|
||||
// There is no support for groups, message sets, or "has" bits.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
type buffer struct {
|
||||
field int // field tag
|
||||
typ int // proto wire type code for field
|
||||
u64 uint64
|
||||
data []byte
|
||||
tmp [16]byte
|
||||
}
|
||||
|
||||
type decoder func(*buffer, message) error
|
||||
|
||||
type message interface {
|
||||
decoder() []decoder
|
||||
encode(*buffer)
|
||||
}
|
||||
|
||||
func marshal(m message) []byte {
|
||||
var b buffer
|
||||
m.encode(&b)
|
||||
return b.data
|
||||
}
|
||||
|
||||
func encodeVarint(b *buffer, x uint64) {
|
||||
for x >= 128 {
|
||||
b.data = append(b.data, byte(x)|0x80)
|
||||
x >>= 7
|
||||
}
|
||||
b.data = append(b.data, byte(x))
|
||||
}
|
||||
|
||||
func encodeLength(b *buffer, tag int, len int) {
|
||||
encodeVarint(b, uint64(tag)<<3|2)
|
||||
encodeVarint(b, uint64(len))
|
||||
}
|
||||
|
||||
func encodeUint64(b *buffer, tag int, x uint64) {
|
||||
// append varint to b.data
|
||||
encodeVarint(b, uint64(tag)<<3)
|
||||
encodeVarint(b, x)
|
||||
}
|
||||
|
||||
func encodeUint64s(b *buffer, tag int, x []uint64) {
|
||||
if len(x) > 2 {
|
||||
// Use packed encoding
|
||||
n1 := len(b.data)
|
||||
for _, u := range x {
|
||||
encodeVarint(b, u)
|
||||
}
|
||||
n2 := len(b.data)
|
||||
encodeLength(b, tag, n2-n1)
|
||||
n3 := len(b.data)
|
||||
copy(b.tmp[:], b.data[n2:n3])
|
||||
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||
return
|
||||
}
|
||||
for _, u := range x {
|
||||
encodeUint64(b, tag, u)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeUint64Opt(b *buffer, tag int, x uint64) {
|
||||
if x == 0 {
|
||||
return
|
||||
}
|
||||
encodeUint64(b, tag, x)
|
||||
}
|
||||
|
||||
func encodeInt64(b *buffer, tag int, x int64) {
|
||||
u := uint64(x)
|
||||
encodeUint64(b, tag, u)
|
||||
}
|
||||
|
||||
func encodeInt64s(b *buffer, tag int, x []int64) {
|
||||
if len(x) > 2 {
|
||||
// Use packed encoding
|
||||
n1 := len(b.data)
|
||||
for _, u := range x {
|
||||
encodeVarint(b, uint64(u))
|
||||
}
|
||||
n2 := len(b.data)
|
||||
encodeLength(b, tag, n2-n1)
|
||||
n3 := len(b.data)
|
||||
copy(b.tmp[:], b.data[n2:n3])
|
||||
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||
return
|
||||
}
|
||||
for _, u := range x {
|
||||
encodeInt64(b, tag, u)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeInt64Opt(b *buffer, tag int, x int64) {
|
||||
if x == 0 {
|
||||
return
|
||||
}
|
||||
encodeInt64(b, tag, x)
|
||||
}
|
||||
|
||||
func encodeString(b *buffer, tag int, x string) {
|
||||
encodeLength(b, tag, len(x))
|
||||
b.data = append(b.data, x...)
|
||||
}
|
||||
|
||||
func encodeStrings(b *buffer, tag int, x []string) {
|
||||
for _, s := range x {
|
||||
encodeString(b, tag, s)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeBool(b *buffer, tag int, x bool) {
|
||||
if x {
|
||||
encodeUint64(b, tag, 1)
|
||||
} else {
|
||||
encodeUint64(b, tag, 0)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeBoolOpt(b *buffer, tag int, x bool) {
|
||||
if x {
|
||||
encodeBool(b, tag, x)
|
||||
}
|
||||
}
|
||||
|
||||
func encodeMessage(b *buffer, tag int, m message) {
|
||||
n1 := len(b.data)
|
||||
m.encode(b)
|
||||
n2 := len(b.data)
|
||||
encodeLength(b, tag, n2-n1)
|
||||
n3 := len(b.data)
|
||||
copy(b.tmp[:], b.data[n2:n3])
|
||||
copy(b.data[n1+(n3-n2):], b.data[n1:n2])
|
||||
copy(b.data[n1:], b.tmp[:n3-n2])
|
||||
}
|
||||
|
||||
func unmarshal(data []byte, m message) (err error) {
|
||||
b := buffer{data: data, typ: 2}
|
||||
return decodeMessage(&b, m)
|
||||
}
|
||||
|
||||
func le64(p []byte) uint64 {
|
||||
return uint64(p[0]) | uint64(p[1])<<8 | uint64(p[2])<<16 | uint64(p[3])<<24 | uint64(p[4])<<32 | uint64(p[5])<<40 | uint64(p[6])<<48 | uint64(p[7])<<56
|
||||
}
|
||||
|
||||
func le32(p []byte) uint32 {
|
||||
return uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
|
||||
}
|
||||
|
||||
func decodeVarint(data []byte) (uint64, []byte, error) {
|
||||
var u uint64
|
||||
for i := 0; ; i++ {
|
||||
if i >= 10 || i >= len(data) {
|
||||
return 0, nil, errors.New("bad varint")
|
||||
}
|
||||
u |= uint64(data[i]&0x7F) << uint(7*i)
|
||||
if data[i]&0x80 == 0 {
|
||||
return u, data[i+1:], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func decodeField(b *buffer, data []byte) ([]byte, error) {
|
||||
x, data, err := decodeVarint(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.field = int(x >> 3)
|
||||
b.typ = int(x & 7)
|
||||
b.data = nil
|
||||
b.u64 = 0
|
||||
switch b.typ {
|
||||
case 0:
|
||||
b.u64, data, err = decodeVarint(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case 1:
|
||||
if len(data) < 8 {
|
||||
return nil, errors.New("not enough data")
|
||||
}
|
||||
b.u64 = le64(data[:8])
|
||||
data = data[8:]
|
||||
case 2:
|
||||
var n uint64
|
||||
n, data, err = decodeVarint(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if n > uint64(len(data)) {
|
||||
return nil, errors.New("too much data")
|
||||
}
|
||||
b.data = data[:n]
|
||||
data = data[n:]
|
||||
case 5:
|
||||
if len(data) < 4 {
|
||||
return nil, errors.New("not enough data")
|
||||
}
|
||||
b.u64 = uint64(le32(data[:4]))
|
||||
data = data[4:]
|
||||
default:
|
||||
return nil, fmt.Errorf("unknown wire type: %d", b.typ)
|
||||
}
|
||||
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func checkType(b *buffer, typ int) error {
|
||||
if b.typ != typ {
|
||||
return errors.New("type mismatch")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeMessage(b *buffer, m message) error {
|
||||
if err := checkType(b, 2); err != nil {
|
||||
return err
|
||||
}
|
||||
dec := m.decoder()
|
||||
data := b.data
|
||||
for len(data) > 0 {
|
||||
// pull varint field# + type
|
||||
var err error
|
||||
data, err = decodeField(b, data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if b.field >= len(dec) || dec[b.field] == nil {
|
||||
continue
|
||||
}
|
||||
if err := dec[b.field](b, m); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeInt64(b *buffer, x *int64) error {
|
||||
if err := checkType(b, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = int64(b.u64)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeInt64s(b *buffer, x *[]int64) error {
|
||||
if b.typ == 2 {
|
||||
// Packed encoding
|
||||
data := b.data
|
||||
tmp := make([]int64, 0, len(data)) // Maximally sized
|
||||
for len(data) > 0 {
|
||||
var u uint64
|
||||
var err error
|
||||
|
||||
if u, data, err = decodeVarint(data); err != nil {
|
||||
return err
|
||||
}
|
||||
tmp = append(tmp, int64(u))
|
||||
}
|
||||
*x = append(*x, tmp...)
|
||||
return nil
|
||||
}
|
||||
var i int64
|
||||
if err := decodeInt64(b, &i); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, i)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeUint64(b *buffer, x *uint64) error {
|
||||
if err := checkType(b, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = b.u64
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeUint64s(b *buffer, x *[]uint64) error {
|
||||
if b.typ == 2 {
|
||||
data := b.data
|
||||
// Packed encoding
|
||||
tmp := make([]uint64, 0, len(data)) // Maximally sized
|
||||
for len(data) > 0 {
|
||||
var u uint64
|
||||
var err error
|
||||
|
||||
if u, data, err = decodeVarint(data); err != nil {
|
||||
return err
|
||||
}
|
||||
tmp = append(tmp, u)
|
||||
}
|
||||
*x = append(*x, tmp...)
|
||||
return nil
|
||||
}
|
||||
var u uint64
|
||||
if err := decodeUint64(b, &u); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, u)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeString(b *buffer, x *string) error {
|
||||
if err := checkType(b, 2); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = string(b.data)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeStrings(b *buffer, x *[]string) error {
|
||||
var s string
|
||||
if err := decodeString(b, &s); err != nil {
|
||||
return err
|
||||
}
|
||||
*x = append(*x, s)
|
||||
return nil
|
||||
}
|
||||
|
||||
func decodeBool(b *buffer, x *bool) error {
|
||||
if err := checkType(b, 0); err != nil {
|
||||
return err
|
||||
}
|
||||
if int64(b.u64) == 0 {
|
||||
*x = false
|
||||
} else {
|
||||
*x = true
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,178 @@
|
|||
// Copyright 2014 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Implements methods to remove frames from profiles.
|
||||
|
||||
package profile
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
reservedNames = []string{"(anonymous namespace)", "operator()"}
|
||||
bracketRx = func() *regexp.Regexp {
|
||||
var quotedNames []string
|
||||
for _, name := range append(reservedNames, "(") {
|
||||
quotedNames = append(quotedNames, regexp.QuoteMeta(name))
|
||||
}
|
||||
return regexp.MustCompile(strings.Join(quotedNames, "|"))
|
||||
}()
|
||||
)
|
||||
|
||||
// simplifyFunc does some primitive simplification of function names.
|
||||
func simplifyFunc(f string) string {
|
||||
// Account for leading '.' on the PPC ELF v1 ABI.
|
||||
funcName := strings.TrimPrefix(f, ".")
|
||||
// Account for unsimplified names -- try to remove the argument list by trimming
|
||||
// starting from the first '(', but skipping reserved names that have '('.
|
||||
for _, ind := range bracketRx.FindAllStringSubmatchIndex(funcName, -1) {
|
||||
foundReserved := false
|
||||
for _, res := range reservedNames {
|
||||
if funcName[ind[0]:ind[1]] == res {
|
||||
foundReserved = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !foundReserved {
|
||||
funcName = funcName[:ind[0]]
|
||||
break
|
||||
}
|
||||
}
|
||||
return funcName
|
||||
}
|
||||
|
||||
// Prune removes all nodes beneath a node matching dropRx, and not
|
||||
// matching keepRx. If the root node of a Sample matches, the sample
|
||||
// will have an empty stack.
|
||||
func (p *Profile) Prune(dropRx, keepRx *regexp.Regexp) {
|
||||
prune := make(map[uint64]bool)
|
||||
pruneBeneath := make(map[uint64]bool)
|
||||
|
||||
for _, loc := range p.Location {
|
||||
var i int
|
||||
for i = len(loc.Line) - 1; i >= 0; i-- {
|
||||
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||
funcName := simplifyFunc(fn.Name)
|
||||
if dropRx.MatchString(funcName) {
|
||||
if keepRx == nil || !keepRx.MatchString(funcName) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if i >= 0 {
|
||||
// Found matching entry to prune.
|
||||
pruneBeneath[loc.ID] = true
|
||||
|
||||
// Remove the matching location.
|
||||
if i == len(loc.Line)-1 {
|
||||
// Matched the top entry: prune the whole location.
|
||||
prune[loc.ID] = true
|
||||
} else {
|
||||
loc.Line = loc.Line[i+1:]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prune locs from each Sample
|
||||
for _, sample := range p.Sample {
|
||||
// Scan from the root to the leaves to find the prune location.
|
||||
// Do not prune frames before the first user frame, to avoid
|
||||
// pruning everything.
|
||||
foundUser := false
|
||||
for i := len(sample.Location) - 1; i >= 0; i-- {
|
||||
id := sample.Location[i].ID
|
||||
if !prune[id] && !pruneBeneath[id] {
|
||||
foundUser = true
|
||||
continue
|
||||
}
|
||||
if !foundUser {
|
||||
continue
|
||||
}
|
||||
if prune[id] {
|
||||
sample.Location = sample.Location[i+1:]
|
||||
break
|
||||
}
|
||||
if pruneBeneath[id] {
|
||||
sample.Location = sample.Location[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// RemoveUninteresting prunes and elides profiles using built-in
|
||||
// tables of uninteresting function names.
|
||||
func (p *Profile) RemoveUninteresting() error {
|
||||
var keep, drop *regexp.Regexp
|
||||
var err error
|
||||
|
||||
if p.DropFrames != "" {
|
||||
if drop, err = regexp.Compile("^(" + p.DropFrames + ")$"); err != nil {
|
||||
return fmt.Errorf("failed to compile regexp %s: %v", p.DropFrames, err)
|
||||
}
|
||||
if p.KeepFrames != "" {
|
||||
if keep, err = regexp.Compile("^(" + p.KeepFrames + ")$"); err != nil {
|
||||
return fmt.Errorf("failed to compile regexp %s: %v", p.KeepFrames, err)
|
||||
}
|
||||
}
|
||||
p.Prune(drop, keep)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PruneFrom removes all nodes beneath the lowest node matching dropRx, not including itself.
|
||||
//
|
||||
// Please see the example below to understand this method as well as
|
||||
// the difference from Prune method.
|
||||
//
|
||||
// A sample contains Location of [A,B,C,B,D] where D is the top frame and there's no inline.
|
||||
//
|
||||
// PruneFrom(A) returns [A,B,C,B,D] because there's no node beneath A.
|
||||
// Prune(A, nil) returns [B,C,B,D] by removing A itself.
|
||||
//
|
||||
// PruneFrom(B) returns [B,C,B,D] by removing all nodes beneath the first B when scanning from the bottom.
|
||||
// Prune(B, nil) returns [D] because a matching node is found by scanning from the root.
|
||||
func (p *Profile) PruneFrom(dropRx *regexp.Regexp) {
|
||||
pruneBeneath := make(map[uint64]bool)
|
||||
|
||||
for _, loc := range p.Location {
|
||||
for i := 0; i < len(loc.Line); i++ {
|
||||
if fn := loc.Line[i].Function; fn != nil && fn.Name != "" {
|
||||
funcName := simplifyFunc(fn.Name)
|
||||
if dropRx.MatchString(funcName) {
|
||||
// Found matching entry to prune.
|
||||
pruneBeneath[loc.ID] = true
|
||||
loc.Line = loc.Line[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prune locs from each Sample
|
||||
for _, sample := range p.Sample {
|
||||
// Scan from the bottom leaf to the root to find the prune location.
|
||||
for i, loc := range sample.Location {
|
||||
if pruneBeneath[loc.ID] {
|
||||
sample.Location = sample.Location[i:]
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,61 +0,0 @@
|
|||
# A QUIC implementation in pure Go
|
||||
|
||||
<img src="docs/quic.png" width=303 height=124>
|
||||
|
||||
[![PkgGoDev](https://pkg.go.dev/badge/github.com/lucas-clemente/quic-go)](https://pkg.go.dev/github.com/lucas-clemente/quic-go)
|
||||
[![Code Coverage](https://img.shields.io/codecov/c/github/lucas-clemente/quic-go/master.svg?style=flat-square)](https://codecov.io/gh/lucas-clemente/quic-go/)
|
||||
|
||||
quic-go is an implementation of the QUIC protocol ([RFC 9000](https://datatracker.ietf.org/doc/html/rfc9000), [RFC 9001](https://datatracker.ietf.org/doc/html/rfc9001), [RFC 9002](https://datatracker.ietf.org/doc/html/rfc9002)) in Go, including the [Unreliable Datagram Extension, RFC 9221](https://datatracker.ietf.org/doc/html/rfc9221). It has support for HTTP/3 [RFC 9114](https://datatracker.ietf.org/doc/html/rfc9114).
|
||||
|
||||
In addition the RFCs listed above, it currently implements the [IETF QUIC draft-29](https://tools.ietf.org/html/draft-ietf-quic-transport-29). Support for draft-29 will eventually be dropped, as it is phased out of the ecosystem.
|
||||
|
||||
## Guides
|
||||
|
||||
*We currently support Go 1.16.x, Go 1.17.x, and Go 1.18.x.*
|
||||
|
||||
Running tests:
|
||||
|
||||
go test ./...
|
||||
|
||||
### QUIC without HTTP/3
|
||||
|
||||
Take a look at [this echo example](example/echo/echo.go).
|
||||
|
||||
## Usage
|
||||
|
||||
### As a server
|
||||
|
||||
See the [example server](example/main.go). Starting a QUIC server is very similar to the standard lib http in go:
|
||||
|
||||
```go
|
||||
http.Handle("/", http.FileServer(http.Dir(wwwDir)))
|
||||
http3.ListenAndServeQUIC("localhost:4242", "/path/to/cert/chain.pem", "/path/to/privkey.pem", nil)
|
||||
```
|
||||
|
||||
### As a client
|
||||
|
||||
See the [example client](example/client/main.go). Use a `http3.RoundTripper` as a `Transport` in a `http.Client`.
|
||||
|
||||
```go
|
||||
http.Client{
|
||||
Transport: &http3.RoundTripper{},
|
||||
}
|
||||
```
|
||||
|
||||
## Projects using quic-go
|
||||
|
||||
| Project | Description | Stars |
|
||||
|------------------------------------------------------|--------------------------------------------------------------------------------------------------------|-------|
|
||||
| [algernon](https://github.com/xyproto/algernon) | Small self-contained pure-Go web server with Lua, Markdown, HTTP/2, QUIC, Redis and PostgreSQL support | ![GitHub Repo stars](https://img.shields.io/github/stars/xyproto/algernon?style=flat-square) |
|
||||
| [caddy](https://github.com/caddyserver/caddy/) | Fast, multi-platform web server with automatic HTTPS | ![GitHub Repo stars](https://img.shields.io/github/stars/caddyserver/caddy?style=flat-square) |
|
||||
| [go-ipfs](https://github.com/ipfs/go-ipfs) | IPFS implementation in go | ![GitHub Repo stars](https://img.shields.io/github/stars/ipfs/go-ipfs?style=flat-square) |
|
||||
| [syncthing](https://github.com/syncthing/syncthing/) | Open Source Continuous File Synchronization | ![GitHub Repo stars](https://img.shields.io/github/stars/syncthing/syncthing?style=flat-square) |
|
||||
| [traefik](https://github.com/traefik/traefik) | The Cloud Native Application Proxy | ![GitHub Repo stars](https://img.shields.io/github/stars/traefik/traefik?style=flat-square) |
|
||||
| [v2ray-core](https://github.com/v2fly/v2ray-core) | A platform for building proxies to bypass network restrictions | ![GitHub Repo stars](https://img.shields.io/github/stars/v2fly/v2ray-core?style=flat-square) |
|
||||
| [cloudflared](https://github.com/cloudflare/cloudflared) | A tunneling daemon that proxies traffic from the Cloudflare network to your origins | ![GitHub Repo stars](https://img.shields.io/github/stars/cloudflare/cloudflared?style=flat-square) |
|
||||
| [OONI Probe](https://github.com/ooni/probe-cli) | The Open Observatory of Network Interference (OONI) aims to empower decentralized efforts in documenting Internet censorship around the world. | ![GitHub Repo stars](https://img.shields.io/github/stars/ooni/probe-cli?style=flat-square) |
|
||||
|
||||
|
||||
## Contributing
|
||||
|
||||
We are always happy to welcome new contributors! We have a number of self-contained issues that are suitable for first-time contributors, they are tagged with [help wanted](https://github.com/lucas-clemente/quic-go/issues?q=is%3Aissue+is%3Aopen+label%3A%22help+wanted%22). If you have any questions, please feel free to reach out by opening an issue or leaving a comment.
|
|
@ -1,338 +0,0 @@
|
|||
package quic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"strings"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/utils"
|
||||
"github.com/lucas-clemente/quic-go/logging"
|
||||
)
|
||||
|
||||
type client struct {
|
||||
sconn sendConn
|
||||
// If the client is created with DialAddr, we create a packet conn.
|
||||
// If it is started with Dial, we take a packet conn as a parameter.
|
||||
createdPacketConn bool
|
||||
|
||||
use0RTT bool
|
||||
|
||||
packetHandlers packetHandlerManager
|
||||
|
||||
tlsConf *tls.Config
|
||||
config *Config
|
||||
|
||||
srcConnID protocol.ConnectionID
|
||||
destConnID protocol.ConnectionID
|
||||
|
||||
initialPacketNumber protocol.PacketNumber
|
||||
hasNegotiatedVersion bool
|
||||
version protocol.VersionNumber
|
||||
|
||||
handshakeChan chan struct{}
|
||||
|
||||
conn quicConn
|
||||
|
||||
tracer logging.ConnectionTracer
|
||||
tracingID uint64
|
||||
logger utils.Logger
|
||||
}
|
||||
|
||||
var (
|
||||
// make it possible to mock connection ID for initial generation in the tests
|
||||
generateConnectionIDForInitial = protocol.GenerateConnectionIDForInitial
|
||||
)
|
||||
|
||||
// DialAddr establishes a new QUIC connection to a server.
|
||||
// It uses a new UDP connection and closes this connection when the QUIC connection is closed.
|
||||
// The hostname for SNI is taken from the given address.
|
||||
// The tls.Config.CipherSuites allows setting of TLS 1.3 cipher suites.
|
||||
func DialAddr(
|
||||
addr string,
|
||||
tlsConf *tls.Config,
|
||||
config *Config,
|
||||
) (Connection, error) {
|
||||
return DialAddrContext(context.Background(), addr, tlsConf, config)
|
||||
}
|
||||
|
||||
// DialAddrEarly establishes a new 0-RTT QUIC connection to a server.
|
||||
// It uses a new UDP connection and closes this connection when the QUIC connection is closed.
|
||||
// The hostname for SNI is taken from the given address.
|
||||
// The tls.Config.CipherSuites allows setting of TLS 1.3 cipher suites.
|
||||
func DialAddrEarly(
|
||||
addr string,
|
||||
tlsConf *tls.Config,
|
||||
config *Config,
|
||||
) (EarlyConnection, error) {
|
||||
return DialAddrEarlyContext(context.Background(), addr, tlsConf, config)
|
||||
}
|
||||
|
||||
// DialAddrEarlyContext establishes a new 0-RTT QUIC connection to a server using provided context.
|
||||
// See DialAddrEarly for details
|
||||
func DialAddrEarlyContext(
|
||||
ctx context.Context,
|
||||
addr string,
|
||||
tlsConf *tls.Config,
|
||||
config *Config,
|
||||
) (EarlyConnection, error) {
|
||||
conn, err := dialAddrContext(ctx, addr, tlsConf, config, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
utils.Logger.WithPrefix(utils.DefaultLogger, "client").Debugf("Returning early connection")
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
// DialAddrContext establishes a new QUIC connection to a server using the provided context.
|
||||
// See DialAddr for details.
|
||||
func DialAddrContext(
|
||||
ctx context.Context,
|
||||
addr string,
|
||||
tlsConf *tls.Config,
|
||||
config *Config,
|
||||
) (Connection, error) {
|
||||
return dialAddrContext(ctx, addr, tlsConf, config, false)
|
||||
}
|
||||
|
||||
func dialAddrContext(
|
||||
ctx context.Context,
|
||||
addr string,
|
||||
tlsConf *tls.Config,
|
||||
config *Config,
|
||||
use0RTT bool,
|
||||
) (quicConn, error) {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
udpConn, err := net.ListenUDP("udp", &net.UDPAddr{IP: net.IPv4zero, Port: 0})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return dialContext(ctx, udpConn, udpAddr, addr, tlsConf, config, use0RTT, true)
|
||||
}
|
||||
|
||||
// Dial establishes a new QUIC connection to a server using a net.PacketConn. If
|
||||
// the PacketConn satisfies the OOBCapablePacketConn interface (as a net.UDPConn
|
||||
// does), ECN and packet info support will be enabled. In this case, ReadMsgUDP
|
||||
// and WriteMsgUDP will be used instead of ReadFrom and WriteTo to read/write
|
||||
// packets. The same PacketConn can be used for multiple calls to Dial and
|
||||
// Listen, QUIC connection IDs are used for demultiplexing the different
|
||||
// connections. The host parameter is used for SNI. The tls.Config must define
|
||||
// an application protocol (using NextProtos).
|
||||
func Dial(
|
||||
pconn net.PacketConn,
|
||||
remoteAddr net.Addr,
|
||||
host string,
|
||||
tlsConf *tls.Config,
|
||||
config *Config,
|
||||
) (Connection, error) {
|
||||
return dialContext(context.Background(), pconn, remoteAddr, host, tlsConf, config, false, false)
|
||||
}
|
||||
|
||||
// DialEarly establishes a new 0-RTT QUIC connection to a server using a net.PacketConn.
|
||||
// The same PacketConn can be used for multiple calls to Dial and Listen,
|
||||
// QUIC connection IDs are used for demultiplexing the different connections.
|
||||
// The host parameter is used for SNI.
|
||||
// The tls.Config must define an application protocol (using NextProtos).
|
||||
func DialEarly(
|
||||
pconn net.PacketConn,
|
||||
remoteAddr net.Addr,
|
||||
host string,
|
||||
tlsConf *tls.Config,
|
||||
config *Config,
|
||||
) (EarlyConnection, error) {
|
||||
return DialEarlyContext(context.Background(), pconn, remoteAddr, host, tlsConf, config)
|
||||
}
|
||||
|
||||
// DialEarlyContext establishes a new 0-RTT QUIC connection to a server using a net.PacketConn using the provided context.
|
||||
// See DialEarly for details.
|
||||
func DialEarlyContext(
|
||||
ctx context.Context,
|
||||
pconn net.PacketConn,
|
||||
remoteAddr net.Addr,
|
||||
host string,
|
||||
tlsConf *tls.Config,
|
||||
config *Config,
|
||||
) (EarlyConnection, error) {
|
||||
return dialContext(ctx, pconn, remoteAddr, host, tlsConf, config, true, false)
|
||||
}
|
||||
|
||||
// DialContext establishes a new QUIC connection to a server using a net.PacketConn using the provided context.
|
||||
// See Dial for details.
|
||||
func DialContext(
|
||||
ctx context.Context,
|
||||
pconn net.PacketConn,
|
||||
remoteAddr net.Addr,
|
||||
host string,
|
||||
tlsConf *tls.Config,
|
||||
config *Config,
|
||||
) (Connection, error) {
|
||||
return dialContext(ctx, pconn, remoteAddr, host, tlsConf, config, false, false)
|
||||
}
|
||||
|
||||
func dialContext(
|
||||
ctx context.Context,
|
||||
pconn net.PacketConn,
|
||||
remoteAddr net.Addr,
|
||||
host string,
|
||||
tlsConf *tls.Config,
|
||||
config *Config,
|
||||
use0RTT bool,
|
||||
createdPacketConn bool,
|
||||
) (quicConn, error) {
|
||||
if tlsConf == nil {
|
||||
return nil, errors.New("quic: tls.Config not set")
|
||||
}
|
||||
if err := validateConfig(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config = populateClientConfig(config, createdPacketConn)
|
||||
packetHandlers, err := getMultiplexer().AddConn(pconn, config.ConnectionIDGenerator.ConnectionIDLen(), config.StatelessResetKey, config.Tracer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := newClient(pconn, remoteAddr, config, tlsConf, host, use0RTT, createdPacketConn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.packetHandlers = packetHandlers
|
||||
|
||||
c.tracingID = nextConnTracingID()
|
||||
if c.config.Tracer != nil {
|
||||
c.tracer = c.config.Tracer.TracerForConnection(
|
||||
context.WithValue(ctx, ConnectionTracingKey, c.tracingID),
|
||||
protocol.PerspectiveClient,
|
||||
c.destConnID,
|
||||
)
|
||||
}
|
||||
if c.tracer != nil {
|
||||
c.tracer.StartedConnection(c.sconn.LocalAddr(), c.sconn.RemoteAddr(), c.srcConnID, c.destConnID)
|
||||
}
|
||||
if err := c.dial(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return c.conn, nil
|
||||
}
|
||||
|
||||
func newClient(
|
||||
pconn net.PacketConn,
|
||||
remoteAddr net.Addr,
|
||||
config *Config,
|
||||
tlsConf *tls.Config,
|
||||
host string,
|
||||
use0RTT bool,
|
||||
createdPacketConn bool,
|
||||
) (*client, error) {
|
||||
if tlsConf == nil {
|
||||
tlsConf = &tls.Config{}
|
||||
} else {
|
||||
tlsConf = tlsConf.Clone()
|
||||
}
|
||||
if tlsConf.ServerName == "" {
|
||||
sni := host
|
||||
if strings.IndexByte(sni, ':') != -1 {
|
||||
var err error
|
||||
sni, _, err = net.SplitHostPort(sni)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
tlsConf.ServerName = sni
|
||||
}
|
||||
|
||||
// check that all versions are actually supported
|
||||
if config != nil {
|
||||
for _, v := range config.Versions {
|
||||
if !protocol.IsValidVersion(v) {
|
||||
return nil, fmt.Errorf("%s is not a valid QUIC version", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
srcConnID, err := config.ConnectionIDGenerator.GenerateConnectionID()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
destConnID, err := generateConnectionIDForInitial()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &client{
|
||||
srcConnID: srcConnID,
|
||||
destConnID: destConnID,
|
||||
sconn: newSendPconn(pconn, remoteAddr),
|
||||
createdPacketConn: createdPacketConn,
|
||||
use0RTT: use0RTT,
|
||||
tlsConf: tlsConf,
|
||||
config: config,
|
||||
version: config.Versions[0],
|
||||
handshakeChan: make(chan struct{}),
|
||||
logger: utils.DefaultLogger.WithPrefix("client"),
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
func (c *client) dial(ctx context.Context) error {
|
||||
c.logger.Infof("Starting new connection to %s (%s -> %s), source connection ID %s, destination connection ID %s, version %s", c.tlsConf.ServerName, c.sconn.LocalAddr(), c.sconn.RemoteAddr(), c.srcConnID, c.destConnID, c.version)
|
||||
|
||||
c.conn = newClientConnection(
|
||||
c.sconn,
|
||||
c.packetHandlers,
|
||||
c.destConnID,
|
||||
c.srcConnID,
|
||||
c.config,
|
||||
c.tlsConf,
|
||||
c.initialPacketNumber,
|
||||
c.use0RTT,
|
||||
c.hasNegotiatedVersion,
|
||||
c.tracer,
|
||||
c.tracingID,
|
||||
c.logger,
|
||||
c.version,
|
||||
)
|
||||
c.packetHandlers.Add(c.srcConnID, c.conn)
|
||||
|
||||
errorChan := make(chan error, 1)
|
||||
go func() {
|
||||
err := c.conn.run() // returns as soon as the connection is closed
|
||||
|
||||
if e := (&errCloseForRecreating{}); !errors.As(err, &e) && c.createdPacketConn {
|
||||
c.packetHandlers.Destroy()
|
||||
}
|
||||
errorChan <- err
|
||||
}()
|
||||
|
||||
// only set when we're using 0-RTT
|
||||
// Otherwise, earlyConnChan will be nil. Receiving from a nil chan blocks forever.
|
||||
var earlyConnChan <-chan struct{}
|
||||
if c.use0RTT {
|
||||
earlyConnChan = c.conn.earlyConnReady()
|
||||
}
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
c.conn.shutdown()
|
||||
return ctx.Err()
|
||||
case err := <-errorChan:
|
||||
var recreateErr *errCloseForRecreating
|
||||
if errors.As(err, &recreateErr) {
|
||||
c.initialPacketNumber = recreateErr.nextPacketNumber
|
||||
c.version = recreateErr.nextVersion
|
||||
c.hasNegotiatedVersion = true
|
||||
return c.dial(ctx)
|
||||
}
|
||||
return err
|
||||
case <-earlyConnChan:
|
||||
// ready to send 0-RTT data
|
||||
return nil
|
||||
case <-c.conn.HandshakeComplete().Done():
|
||||
// handshake successfully completed
|
||||
return nil
|
||||
}
|
||||
}
|
|
@ -1,112 +0,0 @@
|
|||
package quic
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/utils"
|
||||
)
|
||||
|
||||
// A closedLocalConn is a connection that we closed locally.
|
||||
// When receiving packets for such a connection, we need to retransmit the packet containing the CONNECTION_CLOSE frame,
|
||||
// with an exponential backoff.
|
||||
type closedLocalConn struct {
|
||||
conn sendConn
|
||||
connClosePacket []byte
|
||||
|
||||
closeOnce sync.Once
|
||||
closeChan chan struct{} // is closed when the connection is closed or destroyed
|
||||
|
||||
receivedPackets chan *receivedPacket
|
||||
counter uint64 // number of packets received
|
||||
|
||||
perspective protocol.Perspective
|
||||
|
||||
logger utils.Logger
|
||||
}
|
||||
|
||||
var _ packetHandler = &closedLocalConn{}
|
||||
|
||||
// newClosedLocalConn creates a new closedLocalConn and runs it.
|
||||
func newClosedLocalConn(
|
||||
conn sendConn,
|
||||
connClosePacket []byte,
|
||||
perspective protocol.Perspective,
|
||||
logger utils.Logger,
|
||||
) packetHandler {
|
||||
s := &closedLocalConn{
|
||||
conn: conn,
|
||||
connClosePacket: connClosePacket,
|
||||
perspective: perspective,
|
||||
logger: logger,
|
||||
closeChan: make(chan struct{}),
|
||||
receivedPackets: make(chan *receivedPacket, 64),
|
||||
}
|
||||
go s.run()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *closedLocalConn) run() {
|
||||
for {
|
||||
select {
|
||||
case p := <-s.receivedPackets:
|
||||
s.handlePacketImpl(p)
|
||||
case <-s.closeChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *closedLocalConn) handlePacket(p *receivedPacket) {
|
||||
select {
|
||||
case s.receivedPackets <- p:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
func (s *closedLocalConn) handlePacketImpl(_ *receivedPacket) {
|
||||
s.counter++
|
||||
// exponential backoff
|
||||
// only send a CONNECTION_CLOSE for the 1st, 2nd, 4th, 8th, 16th, ... packet arriving
|
||||
for n := s.counter; n > 1; n = n / 2 {
|
||||
if n%2 != 0 {
|
||||
return
|
||||
}
|
||||
}
|
||||
s.logger.Debugf("Received %d packets after sending CONNECTION_CLOSE. Retransmitting.", s.counter)
|
||||
if err := s.conn.Write(s.connClosePacket); err != nil {
|
||||
s.logger.Debugf("Error retransmitting CONNECTION_CLOSE: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *closedLocalConn) shutdown() {
|
||||
s.destroy(nil)
|
||||
}
|
||||
|
||||
func (s *closedLocalConn) destroy(error) {
|
||||
s.closeOnce.Do(func() {
|
||||
close(s.closeChan)
|
||||
})
|
||||
}
|
||||
|
||||
func (s *closedLocalConn) getPerspective() protocol.Perspective {
|
||||
return s.perspective
|
||||
}
|
||||
|
||||
// A closedRemoteConn is a connection that was closed remotely.
|
||||
// For such a connection, we might receive reordered packets that were sent before the CONNECTION_CLOSE.
|
||||
// We can just ignore those packets.
|
||||
type closedRemoteConn struct {
|
||||
perspective protocol.Perspective
|
||||
}
|
||||
|
||||
var _ packetHandler = &closedRemoteConn{}
|
||||
|
||||
func newClosedRemoteConn(pers protocol.Perspective) packetHandler {
|
||||
return &closedRemoteConn{perspective: pers}
|
||||
}
|
||||
|
||||
func (s *closedRemoteConn) handlePacket(*receivedPacket) {}
|
||||
func (s *closedRemoteConn) shutdown() {}
|
||||
func (s *closedRemoteConn) destroy(error) {}
|
||||
func (s *closedRemoteConn) getPerspective() protocol.Perspective { return s.perspective }
|
|
@ -1,94 +0,0 @@
|
|||
package quic
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/utils"
|
||||
"github.com/lucas-clemente/quic-go/internal/wire"
|
||||
)
|
||||
|
||||
type datagramQueue struct {
|
||||
sendQueue chan *wire.DatagramFrame
|
||||
rcvQueue chan []byte
|
||||
|
||||
closeErr error
|
||||
closed chan struct{}
|
||||
|
||||
hasData func()
|
||||
|
||||
dequeued chan error
|
||||
|
||||
logger utils.Logger
|
||||
}
|
||||
|
||||
func newDatagramQueue(hasData func(), logger utils.Logger) *datagramQueue {
|
||||
return &datagramQueue{
|
||||
hasData: hasData,
|
||||
sendQueue: make(chan *wire.DatagramFrame, 1),
|
||||
rcvQueue: make(chan []byte, protocol.DatagramRcvQueueLen),
|
||||
dequeued: make(chan error),
|
||||
closed: make(chan struct{}),
|
||||
logger: logger,
|
||||
}
|
||||
}
|
||||
|
||||
// AddAndWait queues a new DATAGRAM frame for sending.
|
||||
// It blocks until the frame has been dequeued.
|
||||
func (h *datagramQueue) AddAndWait(f *wire.DatagramFrame) error {
|
||||
select {
|
||||
case h.sendQueue <- f:
|
||||
h.hasData()
|
||||
case <-h.closed:
|
||||
return h.closeErr
|
||||
}
|
||||
|
||||
select {
|
||||
case err := <-h.dequeued:
|
||||
return err
|
||||
case <-h.closed:
|
||||
return h.closeErr
|
||||
}
|
||||
}
|
||||
|
||||
// Get dequeues a DATAGRAM frame for sending.
|
||||
func (h *datagramQueue) Get(maxDatagramSize protocol.ByteCount, version protocol.VersionNumber) *wire.DatagramFrame {
|
||||
select {
|
||||
case f := <-h.sendQueue:
|
||||
datagramSize := f.Length(version)
|
||||
if datagramSize > maxDatagramSize {
|
||||
h.dequeued <- fmt.Errorf("datagram size %d exceed current limit of %d", datagramSize, maxDatagramSize)
|
||||
return nil
|
||||
}
|
||||
h.dequeued <- nil
|
||||
return f
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// HandleDatagramFrame handles a received DATAGRAM frame.
|
||||
func (h *datagramQueue) HandleDatagramFrame(f *wire.DatagramFrame) {
|
||||
data := make([]byte, len(f.Data))
|
||||
copy(data, f.Data)
|
||||
select {
|
||||
case h.rcvQueue <- data:
|
||||
default:
|
||||
h.logger.Debugf("Discarding DATAGRAM frame (%d bytes payload)", len(f.Data))
|
||||
}
|
||||
}
|
||||
|
||||
// Receive gets a received DATAGRAM frame.
|
||||
func (h *datagramQueue) Receive() ([]byte, error) {
|
||||
select {
|
||||
case data := <-h.rcvQueue:
|
||||
return data, nil
|
||||
case <-h.closed:
|
||||
return nil, h.closeErr
|
||||
}
|
||||
}
|
||||
|
||||
func (h *datagramQueue) CloseWithError(e error) {
|
||||
h.closeErr = e
|
||||
close(h.closed)
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package ackhandler
|
||||
|
||||
import (
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/utils"
|
||||
"github.com/lucas-clemente/quic-go/logging"
|
||||
)
|
||||
|
||||
// NewAckHandler creates a new SentPacketHandler and a new ReceivedPacketHandler
|
||||
func NewAckHandler(
|
||||
initialPacketNumber protocol.PacketNumber,
|
||||
initialMaxDatagramSize protocol.ByteCount,
|
||||
rttStats *utils.RTTStats,
|
||||
pers protocol.Perspective,
|
||||
tracer logging.ConnectionTracer,
|
||||
logger utils.Logger,
|
||||
version protocol.VersionNumber,
|
||||
) (SentPacketHandler, ReceivedPacketHandler) {
|
||||
sph := newSentPacketHandler(initialPacketNumber, initialMaxDatagramSize, rttStats, pers, tracer, logger)
|
||||
return sph, newReceivedPacketHandler(sph, rttStats, logger, version)
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package ackhandler
|
||||
|
||||
import "github.com/lucas-clemente/quic-go/internal/wire"
|
||||
|
||||
type Frame struct {
|
||||
wire.Frame // nil if the frame has already been acknowledged in another packet
|
||||
OnLost func(wire.Frame)
|
||||
OnAcked func(wire.Frame)
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
package ackhandler
|
||||
|
||||
//go:generate genny -pkg ackhandler -in ../utils/linkedlist/linkedlist.go -out packet_linkedlist.go gen Item=Packet
|
|
@ -1,3 +0,0 @@
|
|||
package ackhandler
|
||||
|
||||
//go:generate sh -c "../../mockgen_private.sh ackhandler mock_sent_packet_tracker_test.go github.com/lucas-clemente/quic-go/internal/ackhandler sentPacketTracker"
|
217
vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_linkedlist.go
generated
vendored
217
vendor/github.com/lucas-clemente/quic-go/internal/ackhandler/packet_linkedlist.go
generated
vendored
|
@ -1,217 +0,0 @@
|
|||
// This file was automatically generated by genny.
|
||||
// Any changes will be lost if this file is regenerated.
|
||||
// see https://github.com/cheekybits/genny
|
||||
|
||||
package ackhandler
|
||||
|
||||
// Linked list implementation from the Go standard library.
|
||||
|
||||
// PacketElement is an element of a linked list.
|
||||
type PacketElement struct {
|
||||
// Next and previous pointers in the doubly-linked list of elements.
|
||||
// To simplify the implementation, internally a list l is implemented
|
||||
// as a ring, such that &l.root is both the next element of the last
|
||||
// list element (l.Back()) and the previous element of the first list
|
||||
// element (l.Front()).
|
||||
next, prev *PacketElement
|
||||
|
||||
// The list to which this element belongs.
|
||||
list *PacketList
|
||||
|
||||
// The value stored with this element.
|
||||
Value Packet
|
||||
}
|
||||
|
||||
// Next returns the next list element or nil.
|
||||
func (e *PacketElement) Next() *PacketElement {
|
||||
if p := e.next; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prev returns the previous list element or nil.
|
||||
func (e *PacketElement) Prev() *PacketElement {
|
||||
if p := e.prev; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PacketList is a linked list of Packets.
|
||||
type PacketList struct {
|
||||
root PacketElement // sentinel list element, only &root, root.prev, and root.next are used
|
||||
len int // current list length excluding (this) sentinel element
|
||||
}
|
||||
|
||||
// Init initializes or clears list l.
|
||||
func (l *PacketList) Init() *PacketList {
|
||||
l.root.next = &l.root
|
||||
l.root.prev = &l.root
|
||||
l.len = 0
|
||||
return l
|
||||
}
|
||||
|
||||
// NewPacketList returns an initialized list.
|
||||
func NewPacketList() *PacketList { return new(PacketList).Init() }
|
||||
|
||||
// Len returns the number of elements of list l.
|
||||
// The complexity is O(1).
|
||||
func (l *PacketList) Len() int { return l.len }
|
||||
|
||||
// Front returns the first element of list l or nil if the list is empty.
|
||||
func (l *PacketList) Front() *PacketElement {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.next
|
||||
}
|
||||
|
||||
// Back returns the last element of list l or nil if the list is empty.
|
||||
func (l *PacketList) Back() *PacketElement {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.prev
|
||||
}
|
||||
|
||||
// lazyInit lazily initializes a zero List value.
|
||||
func (l *PacketList) lazyInit() {
|
||||
if l.root.next == nil {
|
||||
l.Init()
|
||||
}
|
||||
}
|
||||
|
||||
// insert inserts e after at, increments l.len, and returns e.
|
||||
func (l *PacketList) insert(e, at *PacketElement) *PacketElement {
|
||||
n := at.next
|
||||
at.next = e
|
||||
e.prev = at
|
||||
e.next = n
|
||||
n.prev = e
|
||||
e.list = l
|
||||
l.len++
|
||||
return e
|
||||
}
|
||||
|
||||
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
|
||||
func (l *PacketList) insertValue(v Packet, at *PacketElement) *PacketElement {
|
||||
return l.insert(&PacketElement{Value: v}, at)
|
||||
}
|
||||
|
||||
// remove removes e from its list, decrements l.len, and returns e.
|
||||
func (l *PacketList) remove(e *PacketElement) *PacketElement {
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
e.next = nil // avoid memory leaks
|
||||
e.prev = nil // avoid memory leaks
|
||||
e.list = nil
|
||||
l.len--
|
||||
return e
|
||||
}
|
||||
|
||||
// Remove removes e from l if e is an element of list l.
|
||||
// It returns the element value e.Value.
|
||||
// The element must not be nil.
|
||||
func (l *PacketList) Remove(e *PacketElement) Packet {
|
||||
if e.list == l {
|
||||
// if e.list == l, l must have been initialized when e was inserted
|
||||
// in l or l == nil (e is a zero Element) and l.remove will crash
|
||||
l.remove(e)
|
||||
}
|
||||
return e.Value
|
||||
}
|
||||
|
||||
// PushFront inserts a new element e with value v at the front of list l and returns e.
|
||||
func (l *PacketList) PushFront(v Packet) *PacketElement {
|
||||
l.lazyInit()
|
||||
return l.insertValue(v, &l.root)
|
||||
}
|
||||
|
||||
// PushBack inserts a new element e with value v at the back of list l and returns e.
|
||||
func (l *PacketList) PushBack(v Packet) *PacketElement {
|
||||
l.lazyInit()
|
||||
return l.insertValue(v, l.root.prev)
|
||||
}
|
||||
|
||||
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
|
||||
// If mark is not an element of l, the list is not modified.
|
||||
// The mark must not be nil.
|
||||
func (l *PacketList) InsertBefore(v Packet, mark *PacketElement) *PacketElement {
|
||||
if mark.list != l {
|
||||
return nil
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
return l.insertValue(v, mark.prev)
|
||||
}
|
||||
|
||||
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
|
||||
// If mark is not an element of l, the list is not modified.
|
||||
// The mark must not be nil.
|
||||
func (l *PacketList) InsertAfter(v Packet, mark *PacketElement) *PacketElement {
|
||||
if mark.list != l {
|
||||
return nil
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
return l.insertValue(v, mark)
|
||||
}
|
||||
|
||||
// MoveToFront moves element e to the front of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *PacketList) MoveToFront(e *PacketElement) {
|
||||
if e.list != l || l.root.next == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.insert(l.remove(e), &l.root)
|
||||
}
|
||||
|
||||
// MoveToBack moves element e to the back of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *PacketList) MoveToBack(e *PacketElement) {
|
||||
if e.list != l || l.root.prev == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.insert(l.remove(e), l.root.prev)
|
||||
}
|
||||
|
||||
// MoveBefore moves element e to its new position before mark.
|
||||
// If e or mark is not an element of l, or e == mark, the list is not modified.
|
||||
// The element and mark must not be nil.
|
||||
func (l *PacketList) MoveBefore(e, mark *PacketElement) {
|
||||
if e.list != l || e == mark || mark.list != l {
|
||||
return
|
||||
}
|
||||
l.insert(l.remove(e), mark.prev)
|
||||
}
|
||||
|
||||
// MoveAfter moves element e to its new position after mark.
|
||||
// If e or mark is not an element of l, or e == mark, the list is not modified.
|
||||
// The element and mark must not be nil.
|
||||
func (l *PacketList) MoveAfter(e, mark *PacketElement) {
|
||||
if e.list != l || e == mark || mark.list != l {
|
||||
return
|
||||
}
|
||||
l.insert(l.remove(e), mark)
|
||||
}
|
||||
|
||||
// PushBackList inserts a copy of an other list at the back of list l.
|
||||
// The lists l and other may be the same. They must not be nil.
|
||||
func (l *PacketList) PushBackList(other *PacketList) {
|
||||
l.lazyInit()
|
||||
for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
|
||||
l.insertValue(e.Value, l.root.prev)
|
||||
}
|
||||
}
|
||||
|
||||
// PushFrontList inserts a copy of an other list at the front of list l.
|
||||
// The lists l and other may be the same. They must not be nil.
|
||||
func (l *PacketList) PushFrontList(other *PacketList) {
|
||||
l.lazyInit()
|
||||
for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
|
||||
l.insertValue(e.Value, &l.root)
|
||||
}
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
package handshake
|
||||
|
||||
//go:generate sh -c "../../mockgen_private.sh handshake mock_handshake_runner_test.go github.com/lucas-clemente/quic-go/internal/handshake handshakeRunner"
|
|
@ -1,62 +0,0 @@
|
|||
package handshake
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
)
|
||||
|
||||
var (
|
||||
oldRetryAEAD cipher.AEAD // used for QUIC draft versions up to 34
|
||||
retryAEAD cipher.AEAD // used for QUIC draft-34
|
||||
)
|
||||
|
||||
func init() {
|
||||
oldRetryAEAD = initAEAD([16]byte{0xcc, 0xce, 0x18, 0x7e, 0xd0, 0x9a, 0x09, 0xd0, 0x57, 0x28, 0x15, 0x5a, 0x6c, 0xb9, 0x6b, 0xe1})
|
||||
retryAEAD = initAEAD([16]byte{0xbe, 0x0c, 0x69, 0x0b, 0x9f, 0x66, 0x57, 0x5a, 0x1d, 0x76, 0x6b, 0x54, 0xe3, 0x68, 0xc8, 0x4e})
|
||||
}
|
||||
|
||||
func initAEAD(key [16]byte) cipher.AEAD {
|
||||
aes, err := aes.NewCipher(key[:])
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
aead, err := cipher.NewGCM(aes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return aead
|
||||
}
|
||||
|
||||
var (
|
||||
retryBuf bytes.Buffer
|
||||
retryMutex sync.Mutex
|
||||
oldRetryNonce = [12]byte{0xe5, 0x49, 0x30, 0xf9, 0x7f, 0x21, 0x36, 0xf0, 0x53, 0x0a, 0x8c, 0x1c}
|
||||
retryNonce = [12]byte{0x46, 0x15, 0x99, 0xd3, 0x5d, 0x63, 0x2b, 0xf2, 0x23, 0x98, 0x25, 0xbb}
|
||||
)
|
||||
|
||||
// GetRetryIntegrityTag calculates the integrity tag on a Retry packet
|
||||
func GetRetryIntegrityTag(retry []byte, origDestConnID protocol.ConnectionID, version protocol.VersionNumber) *[16]byte {
|
||||
retryMutex.Lock()
|
||||
retryBuf.WriteByte(uint8(origDestConnID.Len()))
|
||||
retryBuf.Write(origDestConnID.Bytes())
|
||||
retryBuf.Write(retry)
|
||||
|
||||
var tag [16]byte
|
||||
var sealed []byte
|
||||
if version != protocol.Version1 {
|
||||
sealed = oldRetryAEAD.Seal(tag[:0], oldRetryNonce[:], nil, retryBuf.Bytes())
|
||||
} else {
|
||||
sealed = retryAEAD.Seal(tag[:0], retryNonce[:], nil, retryBuf.Bytes())
|
||||
}
|
||||
if len(sealed) != 16 {
|
||||
panic(fmt.Sprintf("unexpected Retry integrity tag length: %d", len(sealed)))
|
||||
}
|
||||
retryBuf.Reset()
|
||||
retryMutex.Unlock()
|
||||
return &tag
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
package protocol
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// A ConnectionID in QUIC
|
||||
type ConnectionID []byte
|
||||
|
||||
const maxConnectionIDLen = 20
|
||||
|
||||
// GenerateConnectionID generates a connection ID using cryptographic random
|
||||
func GenerateConnectionID(len int) (ConnectionID, error) {
|
||||
b := make([]byte, len)
|
||||
if _, err := rand.Read(b); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return ConnectionID(b), nil
|
||||
}
|
||||
|
||||
// GenerateConnectionIDForInitial generates a connection ID for the Initial packet.
|
||||
// It uses a length randomly chosen between 8 and 20 bytes.
|
||||
func GenerateConnectionIDForInitial() (ConnectionID, error) {
|
||||
r := make([]byte, 1)
|
||||
if _, err := rand.Read(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
len := MinConnectionIDLenInitial + int(r[0])%(maxConnectionIDLen-MinConnectionIDLenInitial+1)
|
||||
return GenerateConnectionID(len)
|
||||
}
|
||||
|
||||
// ReadConnectionID reads a connection ID of length len from the given io.Reader.
|
||||
// It returns io.EOF if there are not enough bytes to read.
|
||||
func ReadConnectionID(r io.Reader, len int) (ConnectionID, error) {
|
||||
if len == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
c := make(ConnectionID, len)
|
||||
_, err := io.ReadFull(r, c)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
return nil, io.EOF
|
||||
}
|
||||
return c, err
|
||||
}
|
||||
|
||||
// Equal says if two connection IDs are equal
|
||||
func (c ConnectionID) Equal(other ConnectionID) bool {
|
||||
return bytes.Equal(c, other)
|
||||
}
|
||||
|
||||
// Len returns the length of the connection ID in bytes
|
||||
func (c ConnectionID) Len() int {
|
||||
return len(c)
|
||||
}
|
||||
|
||||
// Bytes returns the byte representation
|
||||
func (c ConnectionID) Bytes() []byte {
|
||||
return []byte(c)
|
||||
}
|
||||
|
||||
func (c ConnectionID) String() string {
|
||||
if c.Len() == 0 {
|
||||
return "(empty)"
|
||||
}
|
||||
return fmt.Sprintf("%x", c.Bytes())
|
||||
}
|
||||
|
||||
type DefaultConnectionIDGenerator struct {
|
||||
ConnLen int
|
||||
}
|
||||
|
||||
func (d *DefaultConnectionIDGenerator) GenerateConnectionID() ([]byte, error) {
|
||||
return GenerateConnectionID(d.ConnLen)
|
||||
}
|
||||
|
||||
func (d *DefaultConnectionIDGenerator) ConnectionIDLen() int {
|
||||
return d.ConnLen
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
//go:build go1.16 && !go1.17
|
||||
// +build go1.16,!go1.17
|
||||
|
||||
package qtls
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/cipher"
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"unsafe"
|
||||
|
||||
"github.com/marten-seemann/qtls-go1-16"
|
||||
)
|
||||
|
||||
type (
|
||||
// Alert is a TLS alert
|
||||
Alert = qtls.Alert
|
||||
// A Certificate is qtls.Certificate.
|
||||
Certificate = qtls.Certificate
|
||||
// CertificateRequestInfo contains inforamtion about a certificate request.
|
||||
CertificateRequestInfo = qtls.CertificateRequestInfo
|
||||
// A CipherSuiteTLS13 is a cipher suite for TLS 1.3
|
||||
CipherSuiteTLS13 = qtls.CipherSuiteTLS13
|
||||
// ClientHelloInfo contains information about a ClientHello.
|
||||
ClientHelloInfo = qtls.ClientHelloInfo
|
||||
// ClientSessionCache is a cache used for session resumption.
|
||||
ClientSessionCache = qtls.ClientSessionCache
|
||||
// ClientSessionState is a state needed for session resumption.
|
||||
ClientSessionState = qtls.ClientSessionState
|
||||
// A Config is a qtls.Config.
|
||||
Config = qtls.Config
|
||||
// A Conn is a qtls.Conn.
|
||||
Conn = qtls.Conn
|
||||
// ConnectionState contains information about the state of the connection.
|
||||
ConnectionState = qtls.ConnectionStateWith0RTT
|
||||
// EncryptionLevel is the encryption level of a message.
|
||||
EncryptionLevel = qtls.EncryptionLevel
|
||||
// Extension is a TLS extension
|
||||
Extension = qtls.Extension
|
||||
// ExtraConfig is the qtls.ExtraConfig
|
||||
ExtraConfig = qtls.ExtraConfig
|
||||
// RecordLayer is a qtls RecordLayer.
|
||||
RecordLayer = qtls.RecordLayer
|
||||
)
|
||||
|
||||
const (
|
||||
// EncryptionHandshake is the Handshake encryption level
|
||||
EncryptionHandshake = qtls.EncryptionHandshake
|
||||
// Encryption0RTT is the 0-RTT encryption level
|
||||
Encryption0RTT = qtls.Encryption0RTT
|
||||
// EncryptionApplication is the application data encryption level
|
||||
EncryptionApplication = qtls.EncryptionApplication
|
||||
)
|
||||
|
||||
// AEADAESGCMTLS13 creates a new AES-GCM AEAD for TLS 1.3
|
||||
func AEADAESGCMTLS13(key, fixedNonce []byte) cipher.AEAD {
|
||||
return qtls.AEADAESGCMTLS13(key, fixedNonce)
|
||||
}
|
||||
|
||||
// Client returns a new TLS client side connection.
|
||||
func Client(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
|
||||
return qtls.Client(conn, config, extraConfig)
|
||||
}
|
||||
|
||||
// Server returns a new TLS server side connection.
|
||||
func Server(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
|
||||
return qtls.Server(conn, config, extraConfig)
|
||||
}
|
||||
|
||||
func GetConnectionState(conn *Conn) ConnectionState {
|
||||
return conn.ConnectionStateWith0RTT()
|
||||
}
|
||||
|
||||
// ToTLSConnectionState extracts the tls.ConnectionState
|
||||
func ToTLSConnectionState(cs ConnectionState) tls.ConnectionState {
|
||||
return cs.ConnectionState
|
||||
}
|
||||
|
||||
type cipherSuiteTLS13 struct {
|
||||
ID uint16
|
||||
KeyLen int
|
||||
AEAD func(key, fixedNonce []byte) cipher.AEAD
|
||||
Hash crypto.Hash
|
||||
}
|
||||
|
||||
//go:linkname cipherSuiteTLS13ByID github.com/marten-seemann/qtls-go1-16.cipherSuiteTLS13ByID
|
||||
func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13
|
||||
|
||||
// CipherSuiteTLS13ByID gets a TLS 1.3 cipher suite.
|
||||
func CipherSuiteTLS13ByID(id uint16) *CipherSuiteTLS13 {
|
||||
val := cipherSuiteTLS13ByID(id)
|
||||
cs := (*cipherSuiteTLS13)(unsafe.Pointer(val))
|
||||
return &qtls.CipherSuiteTLS13{
|
||||
ID: cs.ID,
|
||||
KeyLen: cs.KeyLen,
|
||||
AEAD: cs.AEAD,
|
||||
Hash: cs.Hash,
|
||||
}
|
||||
}
|
|
@ -1,100 +0,0 @@
|
|||
//go:build go1.18 && !go1.19
|
||||
// +build go1.18,!go1.19
|
||||
|
||||
package qtls
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/cipher"
|
||||
"crypto/tls"
|
||||
"net"
|
||||
"unsafe"
|
||||
|
||||
"github.com/marten-seemann/qtls-go1-18"
|
||||
)
|
||||
|
||||
type (
|
||||
// Alert is a TLS alert
|
||||
Alert = qtls.Alert
|
||||
// A Certificate is qtls.Certificate.
|
||||
Certificate = qtls.Certificate
|
||||
// CertificateRequestInfo contains inforamtion about a certificate request.
|
||||
CertificateRequestInfo = qtls.CertificateRequestInfo
|
||||
// A CipherSuiteTLS13 is a cipher suite for TLS 1.3
|
||||
CipherSuiteTLS13 = qtls.CipherSuiteTLS13
|
||||
// ClientHelloInfo contains information about a ClientHello.
|
||||
ClientHelloInfo = qtls.ClientHelloInfo
|
||||
// ClientSessionCache is a cache used for session resumption.
|
||||
ClientSessionCache = qtls.ClientSessionCache
|
||||
// ClientSessionState is a state needed for session resumption.
|
||||
ClientSessionState = qtls.ClientSessionState
|
||||
// A Config is a qtls.Config.
|
||||
Config = qtls.Config
|
||||
// A Conn is a qtls.Conn.
|
||||
Conn = qtls.Conn
|
||||
// ConnectionState contains information about the state of the connection.
|
||||
ConnectionState = qtls.ConnectionStateWith0RTT
|
||||
// EncryptionLevel is the encryption level of a message.
|
||||
EncryptionLevel = qtls.EncryptionLevel
|
||||
// Extension is a TLS extension
|
||||
Extension = qtls.Extension
|
||||
// ExtraConfig is the qtls.ExtraConfig
|
||||
ExtraConfig = qtls.ExtraConfig
|
||||
// RecordLayer is a qtls RecordLayer.
|
||||
RecordLayer = qtls.RecordLayer
|
||||
)
|
||||
|
||||
const (
|
||||
// EncryptionHandshake is the Handshake encryption level
|
||||
EncryptionHandshake = qtls.EncryptionHandshake
|
||||
// Encryption0RTT is the 0-RTT encryption level
|
||||
Encryption0RTT = qtls.Encryption0RTT
|
||||
// EncryptionApplication is the application data encryption level
|
||||
EncryptionApplication = qtls.EncryptionApplication
|
||||
)
|
||||
|
||||
// AEADAESGCMTLS13 creates a new AES-GCM AEAD for TLS 1.3
|
||||
func AEADAESGCMTLS13(key, fixedNonce []byte) cipher.AEAD {
|
||||
return qtls.AEADAESGCMTLS13(key, fixedNonce)
|
||||
}
|
||||
|
||||
// Client returns a new TLS client side connection.
|
||||
func Client(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
|
||||
return qtls.Client(conn, config, extraConfig)
|
||||
}
|
||||
|
||||
// Server returns a new TLS server side connection.
|
||||
func Server(conn net.Conn, config *Config, extraConfig *ExtraConfig) *Conn {
|
||||
return qtls.Server(conn, config, extraConfig)
|
||||
}
|
||||
|
||||
func GetConnectionState(conn *Conn) ConnectionState {
|
||||
return conn.ConnectionStateWith0RTT()
|
||||
}
|
||||
|
||||
// ToTLSConnectionState extracts the tls.ConnectionState
|
||||
func ToTLSConnectionState(cs ConnectionState) tls.ConnectionState {
|
||||
return cs.ConnectionState
|
||||
}
|
||||
|
||||
type cipherSuiteTLS13 struct {
|
||||
ID uint16
|
||||
KeyLen int
|
||||
AEAD func(key, fixedNonce []byte) cipher.AEAD
|
||||
Hash crypto.Hash
|
||||
}
|
||||
|
||||
//go:linkname cipherSuiteTLS13ByID github.com/marten-seemann/qtls-go1-18.cipherSuiteTLS13ByID
|
||||
func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13
|
||||
|
||||
// CipherSuiteTLS13ByID gets a TLS 1.3 cipher suite.
|
||||
func CipherSuiteTLS13ByID(id uint16) *CipherSuiteTLS13 {
|
||||
val := cipherSuiteTLS13ByID(id)
|
||||
cs := (*cipherSuiteTLS13)(unsafe.Pointer(val))
|
||||
return &qtls.CipherSuiteTLS13{
|
||||
ID: cs.ID,
|
||||
KeyLen: cs.KeyLen,
|
||||
AEAD: cs.AEAD,
|
||||
Hash: cs.Hash,
|
||||
}
|
||||
}
|
|
@ -1,6 +0,0 @@
|
|||
//go:build go1.20
|
||||
// +build go1.20
|
||||
|
||||
package qtls
|
||||
|
||||
var _ int = "The version of quic-go you're using can't be built on Go 1.20 yet. For more details, please see https://github.com/lucas-clemente/quic-go/wiki/quic-go-and-Go-versions."
|
|
@ -1,7 +0,0 @@
|
|||
//go:build (go1.9 || go1.10 || go1.11 || go1.12 || go1.13 || go1.14 || go1.15) && !go1.16
|
||||
// +build go1.9 go1.10 go1.11 go1.12 go1.13 go1.14 go1.15
|
||||
// +build !go1.16
|
||||
|
||||
package qtls
|
||||
|
||||
var _ int = "The version of quic-go you're using can't be built using outdated Go versions. For more details, please see https://github.com/lucas-clemente/quic-go/wiki/quic-go-and-Go-versions."
|
|
@ -1,22 +0,0 @@
|
|||
package utils
|
||||
|
||||
import "sync/atomic"
|
||||
|
||||
// An AtomicBool is an atomic bool
|
||||
type AtomicBool struct {
|
||||
v int32
|
||||
}
|
||||
|
||||
// Set sets the value
|
||||
func (a *AtomicBool) Set(value bool) {
|
||||
var n int32
|
||||
if value {
|
||||
n = 1
|
||||
}
|
||||
atomic.StoreInt32(&a.v, n)
|
||||
}
|
||||
|
||||
// Get gets the value
|
||||
func (a *AtomicBool) Get() bool {
|
||||
return atomic.LoadInt32(&a.v) != 0
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
package utils
|
||||
|
||||
//go:generate genny -pkg utils -in linkedlist/linkedlist.go -out byteinterval_linkedlist.go gen Item=ByteInterval
|
||||
//go:generate genny -pkg utils -in linkedlist/linkedlist.go -out packetinterval_linkedlist.go gen Item=PacketInterval
|
||||
//go:generate genny -pkg utils -in linkedlist/linkedlist.go -out newconnectionid_linkedlist.go gen Item=NewConnectionID
|
|
@ -1,170 +0,0 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
)
|
||||
|
||||
// InfDuration is a duration of infinite length
|
||||
const InfDuration = time.Duration(math.MaxInt64)
|
||||
|
||||
// Max returns the maximum of two Ints
|
||||
func Max(a, b int) int {
|
||||
if a < b {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// MaxUint32 returns the maximum of two uint32
|
||||
func MaxUint32(a, b uint32) uint32 {
|
||||
if a < b {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// MaxUint64 returns the maximum of two uint64
|
||||
func MaxUint64(a, b uint64) uint64 {
|
||||
if a < b {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// MinUint64 returns the maximum of two uint64
|
||||
func MinUint64(a, b uint64) uint64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// Min returns the minimum of two Ints
|
||||
func Min(a, b int) int {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MinUint32 returns the maximum of two uint32
|
||||
func MinUint32(a, b uint32) uint32 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MinInt64 returns the minimum of two int64
|
||||
func MinInt64(a, b int64) int64 {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxInt64 returns the minimum of two int64
|
||||
func MaxInt64(a, b int64) int64 {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MinByteCount returns the minimum of two ByteCounts
|
||||
func MinByteCount(a, b protocol.ByteCount) protocol.ByteCount {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxByteCount returns the maximum of two ByteCounts
|
||||
func MaxByteCount(a, b protocol.ByteCount) protocol.ByteCount {
|
||||
if a < b {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// MaxDuration returns the max duration
|
||||
func MaxDuration(a, b time.Duration) time.Duration {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MinDuration returns the minimum duration
|
||||
func MinDuration(a, b time.Duration) time.Duration {
|
||||
if a > b {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// MinNonZeroDuration return the minimum duration that's not zero.
|
||||
func MinNonZeroDuration(a, b time.Duration) time.Duration {
|
||||
if a == 0 {
|
||||
return b
|
||||
}
|
||||
if b == 0 {
|
||||
return a
|
||||
}
|
||||
return MinDuration(a, b)
|
||||
}
|
||||
|
||||
// AbsDuration returns the absolute value of a time duration
|
||||
func AbsDuration(d time.Duration) time.Duration {
|
||||
if d >= 0 {
|
||||
return d
|
||||
}
|
||||
return -d
|
||||
}
|
||||
|
||||
// MinTime returns the earlier time
|
||||
func MinTime(a, b time.Time) time.Time {
|
||||
if a.After(b) {
|
||||
return b
|
||||
}
|
||||
return a
|
||||
}
|
||||
|
||||
// MinNonZeroTime returns the earlist time that is not time.Time{}
|
||||
// If both a and b are time.Time{}, it returns time.Time{}
|
||||
func MinNonZeroTime(a, b time.Time) time.Time {
|
||||
if a.IsZero() {
|
||||
return b
|
||||
}
|
||||
if b.IsZero() {
|
||||
return a
|
||||
}
|
||||
return MinTime(a, b)
|
||||
}
|
||||
|
||||
// MaxTime returns the later time
|
||||
func MaxTime(a, b time.Time) time.Time {
|
||||
if a.After(b) {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MaxPacketNumber returns the max packet number
|
||||
func MaxPacketNumber(a, b protocol.PacketNumber) protocol.PacketNumber {
|
||||
if a > b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// MinPacketNumber returns the min packet number
|
||||
func MinPacketNumber(a, b protocol.PacketNumber) protocol.PacketNumber {
|
||||
if a < b {
|
||||
return a
|
||||
}
|
||||
return b
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
package utils
|
||||
|
||||
import (
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
)
|
||||
|
||||
// NewConnectionID is a new connection ID
|
||||
type NewConnectionID struct {
|
||||
SequenceNumber uint64
|
||||
ConnectionID protocol.ConnectionID
|
||||
StatelessResetToken protocol.StatelessResetToken
|
||||
}
|
217
vendor/github.com/lucas-clemente/quic-go/internal/utils/newconnectionid_linkedlist.go
generated
vendored
217
vendor/github.com/lucas-clemente/quic-go/internal/utils/newconnectionid_linkedlist.go
generated
vendored
|
@ -1,217 +0,0 @@
|
|||
// This file was automatically generated by genny.
|
||||
// Any changes will be lost if this file is regenerated.
|
||||
// see https://github.com/cheekybits/genny
|
||||
|
||||
package utils
|
||||
|
||||
// Linked list implementation from the Go standard library.
|
||||
|
||||
// NewConnectionIDElement is an element of a linked list.
|
||||
type NewConnectionIDElement struct {
|
||||
// Next and previous pointers in the doubly-linked list of elements.
|
||||
// To simplify the implementation, internally a list l is implemented
|
||||
// as a ring, such that &l.root is both the next element of the last
|
||||
// list element (l.Back()) and the previous element of the first list
|
||||
// element (l.Front()).
|
||||
next, prev *NewConnectionIDElement
|
||||
|
||||
// The list to which this element belongs.
|
||||
list *NewConnectionIDList
|
||||
|
||||
// The value stored with this element.
|
||||
Value NewConnectionID
|
||||
}
|
||||
|
||||
// Next returns the next list element or nil.
|
||||
func (e *NewConnectionIDElement) Next() *NewConnectionIDElement {
|
||||
if p := e.next; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prev returns the previous list element or nil.
|
||||
func (e *NewConnectionIDElement) Prev() *NewConnectionIDElement {
|
||||
if p := e.prev; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConnectionIDList is a linked list of NewConnectionIDs.
|
||||
type NewConnectionIDList struct {
|
||||
root NewConnectionIDElement // sentinel list element, only &root, root.prev, and root.next are used
|
||||
len int // current list length excluding (this) sentinel element
|
||||
}
|
||||
|
||||
// Init initializes or clears list l.
|
||||
func (l *NewConnectionIDList) Init() *NewConnectionIDList {
|
||||
l.root.next = &l.root
|
||||
l.root.prev = &l.root
|
||||
l.len = 0
|
||||
return l
|
||||
}
|
||||
|
||||
// NewNewConnectionIDList returns an initialized list.
|
||||
func NewNewConnectionIDList() *NewConnectionIDList { return new(NewConnectionIDList).Init() }
|
||||
|
||||
// Len returns the number of elements of list l.
|
||||
// The complexity is O(1).
|
||||
func (l *NewConnectionIDList) Len() int { return l.len }
|
||||
|
||||
// Front returns the first element of list l or nil if the list is empty.
|
||||
func (l *NewConnectionIDList) Front() *NewConnectionIDElement {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.next
|
||||
}
|
||||
|
||||
// Back returns the last element of list l or nil if the list is empty.
|
||||
func (l *NewConnectionIDList) Back() *NewConnectionIDElement {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.prev
|
||||
}
|
||||
|
||||
// lazyInit lazily initializes a zero List value.
|
||||
func (l *NewConnectionIDList) lazyInit() {
|
||||
if l.root.next == nil {
|
||||
l.Init()
|
||||
}
|
||||
}
|
||||
|
||||
// insert inserts e after at, increments l.len, and returns e.
|
||||
func (l *NewConnectionIDList) insert(e, at *NewConnectionIDElement) *NewConnectionIDElement {
|
||||
n := at.next
|
||||
at.next = e
|
||||
e.prev = at
|
||||
e.next = n
|
||||
n.prev = e
|
||||
e.list = l
|
||||
l.len++
|
||||
return e
|
||||
}
|
||||
|
||||
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
|
||||
func (l *NewConnectionIDList) insertValue(v NewConnectionID, at *NewConnectionIDElement) *NewConnectionIDElement {
|
||||
return l.insert(&NewConnectionIDElement{Value: v}, at)
|
||||
}
|
||||
|
||||
// remove removes e from its list, decrements l.len, and returns e.
|
||||
func (l *NewConnectionIDList) remove(e *NewConnectionIDElement) *NewConnectionIDElement {
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
e.next = nil // avoid memory leaks
|
||||
e.prev = nil // avoid memory leaks
|
||||
e.list = nil
|
||||
l.len--
|
||||
return e
|
||||
}
|
||||
|
||||
// Remove removes e from l if e is an element of list l.
|
||||
// It returns the element value e.Value.
|
||||
// The element must not be nil.
|
||||
func (l *NewConnectionIDList) Remove(e *NewConnectionIDElement) NewConnectionID {
|
||||
if e.list == l {
|
||||
// if e.list == l, l must have been initialized when e was inserted
|
||||
// in l or l == nil (e is a zero Element) and l.remove will crash
|
||||
l.remove(e)
|
||||
}
|
||||
return e.Value
|
||||
}
|
||||
|
||||
// PushFront inserts a new element e with value v at the front of list l and returns e.
|
||||
func (l *NewConnectionIDList) PushFront(v NewConnectionID) *NewConnectionIDElement {
|
||||
l.lazyInit()
|
||||
return l.insertValue(v, &l.root)
|
||||
}
|
||||
|
||||
// PushBack inserts a new element e with value v at the back of list l and returns e.
|
||||
func (l *NewConnectionIDList) PushBack(v NewConnectionID) *NewConnectionIDElement {
|
||||
l.lazyInit()
|
||||
return l.insertValue(v, l.root.prev)
|
||||
}
|
||||
|
||||
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
|
||||
// If mark is not an element of l, the list is not modified.
|
||||
// The mark must not be nil.
|
||||
func (l *NewConnectionIDList) InsertBefore(v NewConnectionID, mark *NewConnectionIDElement) *NewConnectionIDElement {
|
||||
if mark.list != l {
|
||||
return nil
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
return l.insertValue(v, mark.prev)
|
||||
}
|
||||
|
||||
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
|
||||
// If mark is not an element of l, the list is not modified.
|
||||
// The mark must not be nil.
|
||||
func (l *NewConnectionIDList) InsertAfter(v NewConnectionID, mark *NewConnectionIDElement) *NewConnectionIDElement {
|
||||
if mark.list != l {
|
||||
return nil
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
return l.insertValue(v, mark)
|
||||
}
|
||||
|
||||
// MoveToFront moves element e to the front of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *NewConnectionIDList) MoveToFront(e *NewConnectionIDElement) {
|
||||
if e.list != l || l.root.next == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.insert(l.remove(e), &l.root)
|
||||
}
|
||||
|
||||
// MoveToBack moves element e to the back of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *NewConnectionIDList) MoveToBack(e *NewConnectionIDElement) {
|
||||
if e.list != l || l.root.prev == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.insert(l.remove(e), l.root.prev)
|
||||
}
|
||||
|
||||
// MoveBefore moves element e to its new position before mark.
|
||||
// If e or mark is not an element of l, or e == mark, the list is not modified.
|
||||
// The element and mark must not be nil.
|
||||
func (l *NewConnectionIDList) MoveBefore(e, mark *NewConnectionIDElement) {
|
||||
if e.list != l || e == mark || mark.list != l {
|
||||
return
|
||||
}
|
||||
l.insert(l.remove(e), mark.prev)
|
||||
}
|
||||
|
||||
// MoveAfter moves element e to its new position after mark.
|
||||
// If e or mark is not an element of l, or e == mark, the list is not modified.
|
||||
// The element and mark must not be nil.
|
||||
func (l *NewConnectionIDList) MoveAfter(e, mark *NewConnectionIDElement) {
|
||||
if e.list != l || e == mark || mark.list != l {
|
||||
return
|
||||
}
|
||||
l.insert(l.remove(e), mark)
|
||||
}
|
||||
|
||||
// PushBackList inserts a copy of an other list at the back of list l.
|
||||
// The lists l and other may be the same. They must not be nil.
|
||||
func (l *NewConnectionIDList) PushBackList(other *NewConnectionIDList) {
|
||||
l.lazyInit()
|
||||
for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
|
||||
l.insertValue(e.Value, l.root.prev)
|
||||
}
|
||||
}
|
||||
|
||||
// PushFrontList inserts a copy of an other list at the front of list l.
|
||||
// The lists l and other may be the same. They must not be nil.
|
||||
func (l *NewConnectionIDList) PushFrontList(other *NewConnectionIDList) {
|
||||
l.lazyInit()
|
||||
for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
|
||||
l.insertValue(e.Value, &l.root)
|
||||
}
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
package utils
|
||||
|
||||
import "github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
|
||||
// PacketInterval is an interval from one PacketNumber to the other
|
||||
type PacketInterval struct {
|
||||
Start protocol.PacketNumber
|
||||
End protocol.PacketNumber
|
||||
}
|
217
vendor/github.com/lucas-clemente/quic-go/internal/utils/packetinterval_linkedlist.go
generated
vendored
217
vendor/github.com/lucas-clemente/quic-go/internal/utils/packetinterval_linkedlist.go
generated
vendored
|
@ -1,217 +0,0 @@
|
|||
// This file was automatically generated by genny.
|
||||
// Any changes will be lost if this file is regenerated.
|
||||
// see https://github.com/cheekybits/genny
|
||||
|
||||
package utils
|
||||
|
||||
// Linked list implementation from the Go standard library.
|
||||
|
||||
// PacketIntervalElement is an element of a linked list.
|
||||
type PacketIntervalElement struct {
|
||||
// Next and previous pointers in the doubly-linked list of elements.
|
||||
// To simplify the implementation, internally a list l is implemented
|
||||
// as a ring, such that &l.root is both the next element of the last
|
||||
// list element (l.Back()) and the previous element of the first list
|
||||
// element (l.Front()).
|
||||
next, prev *PacketIntervalElement
|
||||
|
||||
// The list to which this element belongs.
|
||||
list *PacketIntervalList
|
||||
|
||||
// The value stored with this element.
|
||||
Value PacketInterval
|
||||
}
|
||||
|
||||
// Next returns the next list element or nil.
|
||||
func (e *PacketIntervalElement) Next() *PacketIntervalElement {
|
||||
if p := e.next; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prev returns the previous list element or nil.
|
||||
func (e *PacketIntervalElement) Prev() *PacketIntervalElement {
|
||||
if p := e.prev; e.list != nil && p != &e.list.root {
|
||||
return p
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PacketIntervalList is a linked list of PacketIntervals.
|
||||
type PacketIntervalList struct {
|
||||
root PacketIntervalElement // sentinel list element, only &root, root.prev, and root.next are used
|
||||
len int // current list length excluding (this) sentinel element
|
||||
}
|
||||
|
||||
// Init initializes or clears list l.
|
||||
func (l *PacketIntervalList) Init() *PacketIntervalList {
|
||||
l.root.next = &l.root
|
||||
l.root.prev = &l.root
|
||||
l.len = 0
|
||||
return l
|
||||
}
|
||||
|
||||
// NewPacketIntervalList returns an initialized list.
|
||||
func NewPacketIntervalList() *PacketIntervalList { return new(PacketIntervalList).Init() }
|
||||
|
||||
// Len returns the number of elements of list l.
|
||||
// The complexity is O(1).
|
||||
func (l *PacketIntervalList) Len() int { return l.len }
|
||||
|
||||
// Front returns the first element of list l or nil if the list is empty.
|
||||
func (l *PacketIntervalList) Front() *PacketIntervalElement {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.next
|
||||
}
|
||||
|
||||
// Back returns the last element of list l or nil if the list is empty.
|
||||
func (l *PacketIntervalList) Back() *PacketIntervalElement {
|
||||
if l.len == 0 {
|
||||
return nil
|
||||
}
|
||||
return l.root.prev
|
||||
}
|
||||
|
||||
// lazyInit lazily initializes a zero List value.
|
||||
func (l *PacketIntervalList) lazyInit() {
|
||||
if l.root.next == nil {
|
||||
l.Init()
|
||||
}
|
||||
}
|
||||
|
||||
// insert inserts e after at, increments l.len, and returns e.
|
||||
func (l *PacketIntervalList) insert(e, at *PacketIntervalElement) *PacketIntervalElement {
|
||||
n := at.next
|
||||
at.next = e
|
||||
e.prev = at
|
||||
e.next = n
|
||||
n.prev = e
|
||||
e.list = l
|
||||
l.len++
|
||||
return e
|
||||
}
|
||||
|
||||
// insertValue is a convenience wrapper for insert(&Element{Value: v}, at).
|
||||
func (l *PacketIntervalList) insertValue(v PacketInterval, at *PacketIntervalElement) *PacketIntervalElement {
|
||||
return l.insert(&PacketIntervalElement{Value: v}, at)
|
||||
}
|
||||
|
||||
// remove removes e from its list, decrements l.len, and returns e.
|
||||
func (l *PacketIntervalList) remove(e *PacketIntervalElement) *PacketIntervalElement {
|
||||
e.prev.next = e.next
|
||||
e.next.prev = e.prev
|
||||
e.next = nil // avoid memory leaks
|
||||
e.prev = nil // avoid memory leaks
|
||||
e.list = nil
|
||||
l.len--
|
||||
return e
|
||||
}
|
||||
|
||||
// Remove removes e from l if e is an element of list l.
|
||||
// It returns the element value e.Value.
|
||||
// The element must not be nil.
|
||||
func (l *PacketIntervalList) Remove(e *PacketIntervalElement) PacketInterval {
|
||||
if e.list == l {
|
||||
// if e.list == l, l must have been initialized when e was inserted
|
||||
// in l or l == nil (e is a zero Element) and l.remove will crash
|
||||
l.remove(e)
|
||||
}
|
||||
return e.Value
|
||||
}
|
||||
|
||||
// PushFront inserts a new element e with value v at the front of list l and returns e.
|
||||
func (l *PacketIntervalList) PushFront(v PacketInterval) *PacketIntervalElement {
|
||||
l.lazyInit()
|
||||
return l.insertValue(v, &l.root)
|
||||
}
|
||||
|
||||
// PushBack inserts a new element e with value v at the back of list l and returns e.
|
||||
func (l *PacketIntervalList) PushBack(v PacketInterval) *PacketIntervalElement {
|
||||
l.lazyInit()
|
||||
return l.insertValue(v, l.root.prev)
|
||||
}
|
||||
|
||||
// InsertBefore inserts a new element e with value v immediately before mark and returns e.
|
||||
// If mark is not an element of l, the list is not modified.
|
||||
// The mark must not be nil.
|
||||
func (l *PacketIntervalList) InsertBefore(v PacketInterval, mark *PacketIntervalElement) *PacketIntervalElement {
|
||||
if mark.list != l {
|
||||
return nil
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
return l.insertValue(v, mark.prev)
|
||||
}
|
||||
|
||||
// InsertAfter inserts a new element e with value v immediately after mark and returns e.
|
||||
// If mark is not an element of l, the list is not modified.
|
||||
// The mark must not be nil.
|
||||
func (l *PacketIntervalList) InsertAfter(v PacketInterval, mark *PacketIntervalElement) *PacketIntervalElement {
|
||||
if mark.list != l {
|
||||
return nil
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
return l.insertValue(v, mark)
|
||||
}
|
||||
|
||||
// MoveToFront moves element e to the front of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *PacketIntervalList) MoveToFront(e *PacketIntervalElement) {
|
||||
if e.list != l || l.root.next == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.insert(l.remove(e), &l.root)
|
||||
}
|
||||
|
||||
// MoveToBack moves element e to the back of list l.
|
||||
// If e is not an element of l, the list is not modified.
|
||||
// The element must not be nil.
|
||||
func (l *PacketIntervalList) MoveToBack(e *PacketIntervalElement) {
|
||||
if e.list != l || l.root.prev == e {
|
||||
return
|
||||
}
|
||||
// see comment in List.Remove about initialization of l
|
||||
l.insert(l.remove(e), l.root.prev)
|
||||
}
|
||||
|
||||
// MoveBefore moves element e to its new position before mark.
|
||||
// If e or mark is not an element of l, or e == mark, the list is not modified.
|
||||
// The element and mark must not be nil.
|
||||
func (l *PacketIntervalList) MoveBefore(e, mark *PacketIntervalElement) {
|
||||
if e.list != l || e == mark || mark.list != l {
|
||||
return
|
||||
}
|
||||
l.insert(l.remove(e), mark.prev)
|
||||
}
|
||||
|
||||
// MoveAfter moves element e to its new position after mark.
|
||||
// If e or mark is not an element of l, or e == mark, the list is not modified.
|
||||
// The element and mark must not be nil.
|
||||
func (l *PacketIntervalList) MoveAfter(e, mark *PacketIntervalElement) {
|
||||
if e.list != l || e == mark || mark.list != l {
|
||||
return
|
||||
}
|
||||
l.insert(l.remove(e), mark)
|
||||
}
|
||||
|
||||
// PushBackList inserts a copy of an other list at the back of list l.
|
||||
// The lists l and other may be the same. They must not be nil.
|
||||
func (l *PacketIntervalList) PushBackList(other *PacketIntervalList) {
|
||||
l.lazyInit()
|
||||
for i, e := other.Len(), other.Front(); i > 0; i, e = i-1, e.Next() {
|
||||
l.insertValue(e.Value, l.root.prev)
|
||||
}
|
||||
}
|
||||
|
||||
// PushFrontList inserts a copy of an other list at the front of list l.
|
||||
// The lists l and other may be the same. They must not be nil.
|
||||
func (l *PacketIntervalList) PushFrontList(other *PacketIntervalList) {
|
||||
l.lazyInit()
|
||||
for i, e := other.Len(), other.Back(); i > 0; i, e = i-1, e.Prev() {
|
||||
l.insertValue(e.Value, &l.root)
|
||||
}
|
||||
}
|
9
vendor/github.com/lucas-clemente/quic-go/internal/utils/streamframe_interval.go
generated
vendored
9
vendor/github.com/lucas-clemente/quic-go/internal/utils/streamframe_interval.go
generated
vendored
|
@ -1,9 +0,0 @@
|
|||
package utils
|
||||
|
||||
import "github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
|
||||
// ByteInterval is an interval from one ByteCount to the other
|
||||
type ByteInterval struct {
|
||||
Start protocol.ByteCount
|
||||
End protocol.ByteCount
|
||||
}
|
|
@ -1,249 +0,0 @@
|
|||
package wire
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/utils"
|
||||
"github.com/lucas-clemente/quic-go/quicvarint"
|
||||
)
|
||||
|
||||
// ErrInvalidReservedBits is returned when the reserved bits are incorrect.
|
||||
// When this error is returned, parsing continues, and an ExtendedHeader is returned.
|
||||
// This is necessary because we need to decrypt the packet in that case,
|
||||
// in order to avoid a timing side-channel.
|
||||
var ErrInvalidReservedBits = errors.New("invalid reserved bits")
|
||||
|
||||
// ExtendedHeader is the header of a QUIC packet.
|
||||
type ExtendedHeader struct {
|
||||
Header
|
||||
|
||||
typeByte byte
|
||||
|
||||
KeyPhase protocol.KeyPhaseBit
|
||||
|
||||
PacketNumberLen protocol.PacketNumberLen
|
||||
PacketNumber protocol.PacketNumber
|
||||
|
||||
parsedLen protocol.ByteCount
|
||||
}
|
||||
|
||||
func (h *ExtendedHeader) parse(b *bytes.Reader, v protocol.VersionNumber) (bool /* reserved bits valid */, error) {
|
||||
startLen := b.Len()
|
||||
// read the (now unencrypted) first byte
|
||||
var err error
|
||||
h.typeByte, err = b.ReadByte()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if _, err := b.Seek(int64(h.Header.ParsedLen())-1, io.SeekCurrent); err != nil {
|
||||
return false, err
|
||||
}
|
||||
var reservedBitsValid bool
|
||||
if h.IsLongHeader {
|
||||
reservedBitsValid, err = h.parseLongHeader(b, v)
|
||||
} else {
|
||||
reservedBitsValid, err = h.parseShortHeader(b, v)
|
||||
}
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
h.parsedLen = protocol.ByteCount(startLen - b.Len())
|
||||
return reservedBitsValid, err
|
||||
}
|
||||
|
||||
func (h *ExtendedHeader) parseLongHeader(b *bytes.Reader, _ protocol.VersionNumber) (bool /* reserved bits valid */, error) {
|
||||
if err := h.readPacketNumber(b); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if h.typeByte&0xc != 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (h *ExtendedHeader) parseShortHeader(b *bytes.Reader, _ protocol.VersionNumber) (bool /* reserved bits valid */, error) {
|
||||
h.KeyPhase = protocol.KeyPhaseZero
|
||||
if h.typeByte&0x4 > 0 {
|
||||
h.KeyPhase = protocol.KeyPhaseOne
|
||||
}
|
||||
|
||||
if err := h.readPacketNumber(b); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if h.typeByte&0x18 != 0 {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func (h *ExtendedHeader) readPacketNumber(b *bytes.Reader) error {
|
||||
h.PacketNumberLen = protocol.PacketNumberLen(h.typeByte&0x3) + 1
|
||||
switch h.PacketNumberLen {
|
||||
case protocol.PacketNumberLen1:
|
||||
n, err := b.ReadByte()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.PacketNumber = protocol.PacketNumber(n)
|
||||
case protocol.PacketNumberLen2:
|
||||
n, err := utils.BigEndian.ReadUint16(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.PacketNumber = protocol.PacketNumber(n)
|
||||
case protocol.PacketNumberLen3:
|
||||
n, err := utils.BigEndian.ReadUint24(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.PacketNumber = protocol.PacketNumber(n)
|
||||
case protocol.PacketNumberLen4:
|
||||
n, err := utils.BigEndian.ReadUint32(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
h.PacketNumber = protocol.PacketNumber(n)
|
||||
default:
|
||||
return fmt.Errorf("invalid packet number length: %d", h.PacketNumberLen)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write writes the Header.
|
||||
func (h *ExtendedHeader) Write(b *bytes.Buffer, ver protocol.VersionNumber) error {
|
||||
if h.DestConnectionID.Len() > protocol.MaxConnIDLen {
|
||||
return fmt.Errorf("invalid connection ID length: %d bytes", h.DestConnectionID.Len())
|
||||
}
|
||||
if h.SrcConnectionID.Len() > protocol.MaxConnIDLen {
|
||||
return fmt.Errorf("invalid connection ID length: %d bytes", h.SrcConnectionID.Len())
|
||||
}
|
||||
if h.IsLongHeader {
|
||||
return h.writeLongHeader(b, ver)
|
||||
}
|
||||
return h.writeShortHeader(b, ver)
|
||||
}
|
||||
|
||||
func (h *ExtendedHeader) writeLongHeader(b *bytes.Buffer, version protocol.VersionNumber) error {
|
||||
var packetType uint8
|
||||
if version == protocol.Version2 {
|
||||
//nolint:exhaustive
|
||||
switch h.Type {
|
||||
case protocol.PacketTypeInitial:
|
||||
packetType = 0b01
|
||||
case protocol.PacketType0RTT:
|
||||
packetType = 0b10
|
||||
case protocol.PacketTypeHandshake:
|
||||
packetType = 0b11
|
||||
case protocol.PacketTypeRetry:
|
||||
packetType = 0b00
|
||||
}
|
||||
} else {
|
||||
//nolint:exhaustive
|
||||
switch h.Type {
|
||||
case protocol.PacketTypeInitial:
|
||||
packetType = 0b00
|
||||
case protocol.PacketType0RTT:
|
||||
packetType = 0b01
|
||||
case protocol.PacketTypeHandshake:
|
||||
packetType = 0b10
|
||||
case protocol.PacketTypeRetry:
|
||||
packetType = 0b11
|
||||
}
|
||||
}
|
||||
firstByte := 0xc0 | packetType<<4
|
||||
if h.Type != protocol.PacketTypeRetry {
|
||||
// Retry packets don't have a packet number
|
||||
firstByte |= uint8(h.PacketNumberLen - 1)
|
||||
}
|
||||
|
||||
b.WriteByte(firstByte)
|
||||
utils.BigEndian.WriteUint32(b, uint32(h.Version))
|
||||
b.WriteByte(uint8(h.DestConnectionID.Len()))
|
||||
b.Write(h.DestConnectionID.Bytes())
|
||||
b.WriteByte(uint8(h.SrcConnectionID.Len()))
|
||||
b.Write(h.SrcConnectionID.Bytes())
|
||||
|
||||
//nolint:exhaustive
|
||||
switch h.Type {
|
||||
case protocol.PacketTypeRetry:
|
||||
b.Write(h.Token)
|
||||
return nil
|
||||
case protocol.PacketTypeInitial:
|
||||
quicvarint.Write(b, uint64(len(h.Token)))
|
||||
b.Write(h.Token)
|
||||
}
|
||||
quicvarint.WriteWithLen(b, uint64(h.Length), 2)
|
||||
return h.writePacketNumber(b)
|
||||
}
|
||||
|
||||
func (h *ExtendedHeader) writeShortHeader(b *bytes.Buffer, _ protocol.VersionNumber) error {
|
||||
typeByte := 0x40 | uint8(h.PacketNumberLen-1)
|
||||
if h.KeyPhase == protocol.KeyPhaseOne {
|
||||
typeByte |= byte(1 << 2)
|
||||
}
|
||||
|
||||
b.WriteByte(typeByte)
|
||||
b.Write(h.DestConnectionID.Bytes())
|
||||
return h.writePacketNumber(b)
|
||||
}
|
||||
|
||||
func (h *ExtendedHeader) writePacketNumber(b *bytes.Buffer) error {
|
||||
switch h.PacketNumberLen {
|
||||
case protocol.PacketNumberLen1:
|
||||
b.WriteByte(uint8(h.PacketNumber))
|
||||
case protocol.PacketNumberLen2:
|
||||
utils.BigEndian.WriteUint16(b, uint16(h.PacketNumber))
|
||||
case protocol.PacketNumberLen3:
|
||||
utils.BigEndian.WriteUint24(b, uint32(h.PacketNumber))
|
||||
case protocol.PacketNumberLen4:
|
||||
utils.BigEndian.WriteUint32(b, uint32(h.PacketNumber))
|
||||
default:
|
||||
return fmt.Errorf("invalid packet number length: %d", h.PacketNumberLen)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ParsedLen returns the number of bytes that were consumed when parsing the header
|
||||
func (h *ExtendedHeader) ParsedLen() protocol.ByteCount {
|
||||
return h.parsedLen
|
||||
}
|
||||
|
||||
// GetLength determines the length of the Header.
|
||||
func (h *ExtendedHeader) GetLength(v protocol.VersionNumber) protocol.ByteCount {
|
||||
if h.IsLongHeader {
|
||||
length := 1 /* type byte */ + 4 /* version */ + 1 /* dest conn ID len */ + protocol.ByteCount(h.DestConnectionID.Len()) + 1 /* src conn ID len */ + protocol.ByteCount(h.SrcConnectionID.Len()) + protocol.ByteCount(h.PacketNumberLen) + 2 /* length */
|
||||
if h.Type == protocol.PacketTypeInitial {
|
||||
length += quicvarint.Len(uint64(len(h.Token))) + protocol.ByteCount(len(h.Token))
|
||||
}
|
||||
return length
|
||||
}
|
||||
|
||||
length := protocol.ByteCount(1 /* type byte */ + h.DestConnectionID.Len())
|
||||
length += protocol.ByteCount(h.PacketNumberLen)
|
||||
return length
|
||||
}
|
||||
|
||||
// Log logs the Header
|
||||
func (h *ExtendedHeader) Log(logger utils.Logger) {
|
||||
if h.IsLongHeader {
|
||||
var token string
|
||||
if h.Type == protocol.PacketTypeInitial || h.Type == protocol.PacketTypeRetry {
|
||||
if len(h.Token) == 0 {
|
||||
token = "Token: (empty), "
|
||||
} else {
|
||||
token = fmt.Sprintf("Token: %#x, ", h.Token)
|
||||
}
|
||||
if h.Type == protocol.PacketTypeRetry {
|
||||
logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sVersion: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.Version)
|
||||
return
|
||||
}
|
||||
}
|
||||
logger.Debugf("\tLong Header{Type: %s, DestConnectionID: %s, SrcConnectionID: %s, %sPacketNumber: %d, PacketNumberLen: %d, Length: %d, Version: %s}", h.Type, h.DestConnectionID, h.SrcConnectionID, token, h.PacketNumber, h.PacketNumberLen, h.Length, h.Version)
|
||||
} else {
|
||||
logger.Debugf("\tShort Header{DestConnectionID: %s, PacketNumber: %d, PacketNumberLen: %d, KeyPhase: %s}", h.DestConnectionID, h.PacketNumber, h.PacketNumberLen, h.KeyPhase)
|
||||
}
|
||||
}
|
|
@ -1,143 +0,0 @@
|
|||
package wire
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/qerr"
|
||||
)
|
||||
|
||||
type frameParser struct {
|
||||
ackDelayExponent uint8
|
||||
|
||||
supportsDatagrams bool
|
||||
|
||||
version protocol.VersionNumber
|
||||
}
|
||||
|
||||
// NewFrameParser creates a new frame parser.
|
||||
func NewFrameParser(supportsDatagrams bool, v protocol.VersionNumber) FrameParser {
|
||||
return &frameParser{
|
||||
supportsDatagrams: supportsDatagrams,
|
||||
version: v,
|
||||
}
|
||||
}
|
||||
|
||||
// ParseNext parses the next frame.
|
||||
// It skips PADDING frames.
|
||||
func (p *frameParser) ParseNext(r *bytes.Reader, encLevel protocol.EncryptionLevel) (Frame, error) {
|
||||
for r.Len() != 0 {
|
||||
typeByte, _ := r.ReadByte()
|
||||
if typeByte == 0x0 { // PADDING frame
|
||||
continue
|
||||
}
|
||||
r.UnreadByte()
|
||||
|
||||
f, err := p.parseFrame(r, typeByte, encLevel)
|
||||
if err != nil {
|
||||
return nil, &qerr.TransportError{
|
||||
FrameType: uint64(typeByte),
|
||||
ErrorCode: qerr.FrameEncodingError,
|
||||
ErrorMessage: err.Error(),
|
||||
}
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (p *frameParser) parseFrame(r *bytes.Reader, typeByte byte, encLevel protocol.EncryptionLevel) (Frame, error) {
|
||||
var frame Frame
|
||||
var err error
|
||||
if typeByte&0xf8 == 0x8 {
|
||||
frame, err = parseStreamFrame(r, p.version)
|
||||
} else {
|
||||
switch typeByte {
|
||||
case 0x1:
|
||||
frame, err = parsePingFrame(r, p.version)
|
||||
case 0x2, 0x3:
|
||||
ackDelayExponent := p.ackDelayExponent
|
||||
if encLevel != protocol.Encryption1RTT {
|
||||
ackDelayExponent = protocol.DefaultAckDelayExponent
|
||||
}
|
||||
frame, err = parseAckFrame(r, ackDelayExponent, p.version)
|
||||
case 0x4:
|
||||
frame, err = parseResetStreamFrame(r, p.version)
|
||||
case 0x5:
|
||||
frame, err = parseStopSendingFrame(r, p.version)
|
||||
case 0x6:
|
||||
frame, err = parseCryptoFrame(r, p.version)
|
||||
case 0x7:
|
||||
frame, err = parseNewTokenFrame(r, p.version)
|
||||
case 0x10:
|
||||
frame, err = parseMaxDataFrame(r, p.version)
|
||||
case 0x11:
|
||||
frame, err = parseMaxStreamDataFrame(r, p.version)
|
||||
case 0x12, 0x13:
|
||||
frame, err = parseMaxStreamsFrame(r, p.version)
|
||||
case 0x14:
|
||||
frame, err = parseDataBlockedFrame(r, p.version)
|
||||
case 0x15:
|
||||
frame, err = parseStreamDataBlockedFrame(r, p.version)
|
||||
case 0x16, 0x17:
|
||||
frame, err = parseStreamsBlockedFrame(r, p.version)
|
||||
case 0x18:
|
||||
frame, err = parseNewConnectionIDFrame(r, p.version)
|
||||
case 0x19:
|
||||
frame, err = parseRetireConnectionIDFrame(r, p.version)
|
||||
case 0x1a:
|
||||
frame, err = parsePathChallengeFrame(r, p.version)
|
||||
case 0x1b:
|
||||
frame, err = parsePathResponseFrame(r, p.version)
|
||||
case 0x1c, 0x1d:
|
||||
frame, err = parseConnectionCloseFrame(r, p.version)
|
||||
case 0x1e:
|
||||
frame, err = parseHandshakeDoneFrame(r, p.version)
|
||||
case 0x30, 0x31:
|
||||
if p.supportsDatagrams {
|
||||
frame, err = parseDatagramFrame(r, p.version)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
err = errors.New("unknown frame type")
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !p.isAllowedAtEncLevel(frame, encLevel) {
|
||||
return nil, fmt.Errorf("%s not allowed at encryption level %s", reflect.TypeOf(frame).Elem().Name(), encLevel)
|
||||
}
|
||||
return frame, nil
|
||||
}
|
||||
|
||||
func (p *frameParser) isAllowedAtEncLevel(f Frame, encLevel protocol.EncryptionLevel) bool {
|
||||
switch encLevel {
|
||||
case protocol.EncryptionInitial, protocol.EncryptionHandshake:
|
||||
switch f.(type) {
|
||||
case *CryptoFrame, *AckFrame, *ConnectionCloseFrame, *PingFrame:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
case protocol.Encryption0RTT:
|
||||
switch f.(type) {
|
||||
case *CryptoFrame, *AckFrame, *ConnectionCloseFrame, *NewTokenFrame, *PathResponseFrame, *RetireConnectionIDFrame:
|
||||
return false
|
||||
default:
|
||||
return true
|
||||
}
|
||||
case protocol.Encryption1RTT:
|
||||
return true
|
||||
default:
|
||||
panic("unknown encryption level")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *frameParser) SetAckDelayExponent(exp uint8) {
|
||||
p.ackDelayExponent = exp
|
||||
}
|
28
vendor/github.com/lucas-clemente/quic-go/internal/wire/handshake_done_frame.go
generated
vendored
28
vendor/github.com/lucas-clemente/quic-go/internal/wire/handshake_done_frame.go
generated
vendored
|
@ -1,28 +0,0 @@
|
|||
package wire
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
)
|
||||
|
||||
// A HandshakeDoneFrame is a HANDSHAKE_DONE frame
|
||||
type HandshakeDoneFrame struct{}
|
||||
|
||||
// ParseHandshakeDoneFrame parses a HandshakeDone frame
|
||||
func parseHandshakeDoneFrame(r *bytes.Reader, _ protocol.VersionNumber) (*HandshakeDoneFrame, error) {
|
||||
if _, err := r.ReadByte(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &HandshakeDoneFrame{}, nil
|
||||
}
|
||||
|
||||
func (f *HandshakeDoneFrame) Write(b *bytes.Buffer, _ protocol.VersionNumber) error {
|
||||
b.WriteByte(0x1e)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Length of a written frame
|
||||
func (f *HandshakeDoneFrame) Length(_ protocol.VersionNumber) protocol.ByteCount {
|
||||
return 1
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
package wire
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
)
|
||||
|
||||
// A PingFrame is a PING frame
|
||||
type PingFrame struct{}
|
||||
|
||||
func parsePingFrame(r *bytes.Reader, _ protocol.VersionNumber) (*PingFrame, error) {
|
||||
if _, err := r.ReadByte(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &PingFrame{}, nil
|
||||
}
|
||||
|
||||
func (f *PingFrame) Write(b *bytes.Buffer, version protocol.VersionNumber) error {
|
||||
b.WriteByte(0x1)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Length of a written frame
|
||||
func (f *PingFrame) Length(version protocol.VersionNumber) protocol.ByteCount {
|
||||
return 1
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
package logging
|
||||
|
||||
//go:generate sh -c "mockgen -package logging -self_package github.com/lucas-clemente/quic-go/logging -destination mock_connection_tracer_test.go github.com/lucas-clemente/quic-go/logging ConnectionTracer"
|
||||
//go:generate sh -c "mockgen -package logging -self_package github.com/lucas-clemente/quic-go/logging -destination mock_tracer_test.go github.com/lucas-clemente/quic-go/logging Tracer"
|
|
@ -1,27 +0,0 @@
|
|||
package quic
|
||||
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_send_conn_test.go github.com/lucas-clemente/quic-go sendConn"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_sender_test.go github.com/lucas-clemente/quic-go sender"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_stream_internal_test.go github.com/lucas-clemente/quic-go streamI"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_crypto_stream_test.go github.com/lucas-clemente/quic-go cryptoStream"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_receive_stream_internal_test.go github.com/lucas-clemente/quic-go receiveStreamI"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_send_stream_internal_test.go github.com/lucas-clemente/quic-go sendStreamI"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_stream_sender_test.go github.com/lucas-clemente/quic-go streamSender"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_stream_getter_test.go github.com/lucas-clemente/quic-go streamGetter"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_crypto_data_handler_test.go github.com/lucas-clemente/quic-go cryptoDataHandler"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_frame_source_test.go github.com/lucas-clemente/quic-go frameSource"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_ack_frame_source_test.go github.com/lucas-clemente/quic-go ackFrameSource"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_stream_manager_test.go github.com/lucas-clemente/quic-go streamManager"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_sealing_manager_test.go github.com/lucas-clemente/quic-go sealingManager"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_unpacker_test.go github.com/lucas-clemente/quic-go unpacker"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_packer_test.go github.com/lucas-clemente/quic-go packer"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_mtu_discoverer_test.go github.com/lucas-clemente/quic-go mtuDiscoverer"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_conn_runner_test.go github.com/lucas-clemente/quic-go connRunner"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_quic_conn_test.go github.com/lucas-clemente/quic-go quicConn"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_test.go github.com/lucas-clemente/quic-go packetHandler"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_unknown_packet_handler_test.go github.com/lucas-clemente/quic-go unknownPacketHandler"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_packet_handler_manager_test.go github.com/lucas-clemente/quic-go packetHandlerManager"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_multiplexer_test.go github.com/lucas-clemente/quic-go multiplexer"
|
||||
//go:generate sh -c "./mockgen_private.sh quic mock_batch_conn_test.go github.com/lucas-clemente/quic-go batchConn"
|
||||
//go:generate sh -c "mockgen -package quic -self_package github.com/lucas-clemente/quic-go -destination mock_token_store_test.go github.com/lucas-clemente/quic-go TokenStore"
|
||||
//go:generate sh -c "mockgen -package quic -self_package github.com/lucas-clemente/quic-go -destination mock_packetconn_test.go net PacketConn"
|
|
@ -1,49 +0,0 @@
|
|||
#!/bin/bash
|
||||
|
||||
DEST=$2
|
||||
PACKAGE=$3
|
||||
TMPFILE="mockgen_tmp.go"
|
||||
# uppercase the name of the interface
|
||||
ORIG_INTERFACE_NAME=$4
|
||||
INTERFACE_NAME="$(tr '[:lower:]' '[:upper:]' <<< ${ORIG_INTERFACE_NAME:0:1})${ORIG_INTERFACE_NAME:1}"
|
||||
|
||||
# Gather all files that contain interface definitions.
|
||||
# These interfaces might be used as embedded interfaces,
|
||||
# so we need to pass them to mockgen as aux_files.
|
||||
AUX=()
|
||||
for f in *.go; do
|
||||
if [[ -z ${f##*_test.go} ]]; then
|
||||
# skip test files
|
||||
continue;
|
||||
fi
|
||||
if $(egrep -qe "type (.*) interface" $f); then
|
||||
AUX+=("github.com/lucas-clemente/quic-go=$f")
|
||||
fi
|
||||
done
|
||||
|
||||
# Find the file that defines the interface we're mocking.
|
||||
for f in *.go; do
|
||||
if [[ -z ${f##*_test.go} ]]; then
|
||||
# skip test files
|
||||
continue;
|
||||
fi
|
||||
INTERFACE=$(sed -n "/^type $ORIG_INTERFACE_NAME interface/,/^}/p" $f)
|
||||
if [[ -n "$INTERFACE" ]]; then
|
||||
SRC=$f
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ -z "$INTERFACE" ]]; then
|
||||
echo "Interface $ORIG_INTERFACE_NAME not found."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
AUX_FILES=$(IFS=, ; echo "${AUX[*]}")
|
||||
|
||||
## create a public alias for the interface, so that mockgen can process it
|
||||
echo -e "package $1\n" > $TMPFILE
|
||||
echo "$INTERFACE" | sed "s/$ORIG_INTERFACE_NAME/$INTERFACE_NAME/" >> $TMPFILE
|
||||
mockgen -package $1 -self_package $3 -destination $DEST -source=$TMPFILE -aux_files $AUX_FILES
|
||||
sed "s/$TMPFILE/$SRC/" "$DEST" > "$DEST.new" && mv "$DEST.new" "$DEST"
|
||||
rm "$TMPFILE"
|
|
@ -1,107 +0,0 @@
|
|||
package quic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/utils"
|
||||
"github.com/lucas-clemente/quic-go/logging"
|
||||
)
|
||||
|
||||
var (
|
||||
connMuxerOnce sync.Once
|
||||
connMuxer multiplexer
|
||||
)
|
||||
|
||||
type indexableConn interface {
|
||||
LocalAddr() net.Addr
|
||||
}
|
||||
|
||||
type multiplexer interface {
|
||||
AddConn(c net.PacketConn, connIDLen int, statelessResetKey []byte, tracer logging.Tracer) (packetHandlerManager, error)
|
||||
RemoveConn(indexableConn) error
|
||||
}
|
||||
|
||||
type connManager struct {
|
||||
connIDLen int
|
||||
statelessResetKey []byte
|
||||
tracer logging.Tracer
|
||||
manager packetHandlerManager
|
||||
}
|
||||
|
||||
// The connMultiplexer listens on multiple net.PacketConns and dispatches
|
||||
// incoming packets to the connection handler.
|
||||
type connMultiplexer struct {
|
||||
mutex sync.Mutex
|
||||
|
||||
conns map[string] /* LocalAddr().String() */ connManager
|
||||
newPacketHandlerManager func(net.PacketConn, int, []byte, logging.Tracer, utils.Logger) (packetHandlerManager, error) // so it can be replaced in the tests
|
||||
|
||||
logger utils.Logger
|
||||
}
|
||||
|
||||
var _ multiplexer = &connMultiplexer{}
|
||||
|
||||
func getMultiplexer() multiplexer {
|
||||
connMuxerOnce.Do(func() {
|
||||
connMuxer = &connMultiplexer{
|
||||
conns: make(map[string]connManager),
|
||||
logger: utils.DefaultLogger.WithPrefix("muxer"),
|
||||
newPacketHandlerManager: newPacketHandlerMap,
|
||||
}
|
||||
})
|
||||
return connMuxer
|
||||
}
|
||||
|
||||
func (m *connMultiplexer) AddConn(
|
||||
c net.PacketConn,
|
||||
connIDLen int,
|
||||
statelessResetKey []byte,
|
||||
tracer logging.Tracer,
|
||||
) (packetHandlerManager, error) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
addr := c.LocalAddr()
|
||||
connIndex := addr.Network() + " " + addr.String()
|
||||
p, ok := m.conns[connIndex]
|
||||
if !ok {
|
||||
manager, err := m.newPacketHandlerManager(c, connIDLen, statelessResetKey, tracer, m.logger)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
p = connManager{
|
||||
connIDLen: connIDLen,
|
||||
statelessResetKey: statelessResetKey,
|
||||
manager: manager,
|
||||
tracer: tracer,
|
||||
}
|
||||
m.conns[connIndex] = p
|
||||
} else {
|
||||
if p.connIDLen != connIDLen {
|
||||
return nil, fmt.Errorf("cannot use %d byte connection IDs on a connection that is already using %d byte connction IDs", connIDLen, p.connIDLen)
|
||||
}
|
||||
if statelessResetKey != nil && !bytes.Equal(p.statelessResetKey, statelessResetKey) {
|
||||
return nil, fmt.Errorf("cannot use different stateless reset keys on the same packet conn")
|
||||
}
|
||||
if tracer != p.tracer {
|
||||
return nil, fmt.Errorf("cannot use different tracers on the same packet conn")
|
||||
}
|
||||
}
|
||||
return p.manager, nil
|
||||
}
|
||||
|
||||
func (m *connMultiplexer) RemoveConn(c indexableConn) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
connIndex := c.LocalAddr().Network() + " " + c.LocalAddr().String()
|
||||
if _, ok := m.conns[connIndex]; !ok {
|
||||
return fmt.Errorf("cannote remove connection, connection is unknown")
|
||||
}
|
||||
|
||||
delete(m.conns, connIndex)
|
||||
return nil
|
||||
}
|
|
@ -1,489 +0,0 @@
|
|||
package quic
|
||||
|
||||
import (
|
||||
"crypto/hmac"
|
||||
"crypto/rand"
|
||||
"crypto/sha256"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/utils"
|
||||
"github.com/lucas-clemente/quic-go/internal/wire"
|
||||
"github.com/lucas-clemente/quic-go/logging"
|
||||
)
|
||||
|
||||
type zeroRTTQueue struct {
|
||||
queue []*receivedPacket
|
||||
retireTimer *time.Timer
|
||||
}
|
||||
|
||||
var _ packetHandler = &zeroRTTQueue{}
|
||||
|
||||
func (h *zeroRTTQueue) handlePacket(p *receivedPacket) {
|
||||
if len(h.queue) < protocol.Max0RTTQueueLen {
|
||||
h.queue = append(h.queue, p)
|
||||
}
|
||||
}
|
||||
func (h *zeroRTTQueue) shutdown() {}
|
||||
func (h *zeroRTTQueue) destroy(error) {}
|
||||
func (h *zeroRTTQueue) getPerspective() protocol.Perspective { return protocol.PerspectiveClient }
|
||||
func (h *zeroRTTQueue) EnqueueAll(sess packetHandler) {
|
||||
for _, p := range h.queue {
|
||||
sess.handlePacket(p)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *zeroRTTQueue) Clear() {
|
||||
for _, p := range h.queue {
|
||||
p.buffer.Release()
|
||||
}
|
||||
}
|
||||
|
||||
// rawConn is a connection that allow reading of a receivedPacket.
|
||||
type rawConn interface {
|
||||
ReadPacket() (*receivedPacket, error)
|
||||
WritePacket(b []byte, addr net.Addr, oob []byte) (int, error)
|
||||
LocalAddr() net.Addr
|
||||
io.Closer
|
||||
}
|
||||
|
||||
type packetHandlerMapEntry struct {
|
||||
packetHandler packetHandler
|
||||
is0RTTQueue bool
|
||||
}
|
||||
|
||||
// The packetHandlerMap stores packetHandlers, identified by connection ID.
|
||||
// It is used:
|
||||
// * by the server to store connections
|
||||
// * when multiplexing outgoing connections to store clients
|
||||
type packetHandlerMap struct {
|
||||
mutex sync.Mutex
|
||||
|
||||
conn rawConn
|
||||
connIDLen int
|
||||
|
||||
handlers map[string] /* string(ConnectionID)*/ packetHandlerMapEntry
|
||||
resetTokens map[protocol.StatelessResetToken] /* stateless reset token */ packetHandler
|
||||
server unknownPacketHandler
|
||||
numZeroRTTEntries int
|
||||
|
||||
listening chan struct{} // is closed when listen returns
|
||||
closed bool
|
||||
|
||||
deleteRetiredConnsAfter time.Duration
|
||||
zeroRTTQueueDuration time.Duration
|
||||
|
||||
statelessResetEnabled bool
|
||||
statelessResetMutex sync.Mutex
|
||||
statelessResetHasher hash.Hash
|
||||
|
||||
tracer logging.Tracer
|
||||
logger utils.Logger
|
||||
}
|
||||
|
||||
var _ packetHandlerManager = &packetHandlerMap{}
|
||||
|
||||
func setReceiveBuffer(c net.PacketConn, logger utils.Logger) error {
|
||||
conn, ok := c.(interface{ SetReadBuffer(int) error })
|
||||
if !ok {
|
||||
return errors.New("connection doesn't allow setting of receive buffer size. Not a *net.UDPConn?")
|
||||
}
|
||||
size, err := inspectReadBuffer(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to determine receive buffer size: %w", err)
|
||||
}
|
||||
if size >= protocol.DesiredReceiveBufferSize {
|
||||
logger.Debugf("Conn has receive buffer of %d kiB (wanted: at least %d kiB)", size/1024, protocol.DesiredReceiveBufferSize/1024)
|
||||
return nil
|
||||
}
|
||||
if err := conn.SetReadBuffer(protocol.DesiredReceiveBufferSize); err != nil {
|
||||
return fmt.Errorf("failed to increase receive buffer size: %w", err)
|
||||
}
|
||||
newSize, err := inspectReadBuffer(c)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to determine receive buffer size: %w", err)
|
||||
}
|
||||
if newSize == size {
|
||||
return fmt.Errorf("failed to increase receive buffer size (wanted: %d kiB, got %d kiB)", protocol.DesiredReceiveBufferSize/1024, newSize/1024)
|
||||
}
|
||||
if newSize < protocol.DesiredReceiveBufferSize {
|
||||
return fmt.Errorf("failed to sufficiently increase receive buffer size (was: %d kiB, wanted: %d kiB, got: %d kiB)", size/1024, protocol.DesiredReceiveBufferSize/1024, newSize/1024)
|
||||
}
|
||||
logger.Debugf("Increased receive buffer size to %d kiB", newSize/1024)
|
||||
return nil
|
||||
}
|
||||
|
||||
// only print warnings about the UDP receive buffer size once
|
||||
var receiveBufferWarningOnce sync.Once
|
||||
|
||||
func newPacketHandlerMap(
|
||||
c net.PacketConn,
|
||||
connIDLen int,
|
||||
statelessResetKey []byte,
|
||||
tracer logging.Tracer,
|
||||
logger utils.Logger,
|
||||
) (packetHandlerManager, error) {
|
||||
if err := setReceiveBuffer(c, logger); err != nil {
|
||||
if !strings.Contains(err.Error(), "use of closed network connection") {
|
||||
receiveBufferWarningOnce.Do(func() {
|
||||
if disable, _ := strconv.ParseBool(os.Getenv("QUIC_GO_DISABLE_RECEIVE_BUFFER_WARNING")); disable {
|
||||
return
|
||||
}
|
||||
log.Printf("%s. See https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size for details.", err)
|
||||
})
|
||||
}
|
||||
}
|
||||
conn, err := wrapConn(c)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
m := &packetHandlerMap{
|
||||
conn: conn,
|
||||
connIDLen: connIDLen,
|
||||
listening: make(chan struct{}),
|
||||
handlers: make(map[string]packetHandlerMapEntry),
|
||||
resetTokens: make(map[protocol.StatelessResetToken]packetHandler),
|
||||
deleteRetiredConnsAfter: protocol.RetiredConnectionIDDeleteTimeout,
|
||||
zeroRTTQueueDuration: protocol.Max0RTTQueueingDuration,
|
||||
statelessResetEnabled: len(statelessResetKey) > 0,
|
||||
statelessResetHasher: hmac.New(sha256.New, statelessResetKey),
|
||||
tracer: tracer,
|
||||
logger: logger,
|
||||
}
|
||||
go m.listen()
|
||||
|
||||
if logger.Debug() {
|
||||
go m.logUsage()
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) logUsage() {
|
||||
ticker := time.NewTicker(2 * time.Second)
|
||||
var printedZero bool
|
||||
for {
|
||||
select {
|
||||
case <-h.listening:
|
||||
return
|
||||
case <-ticker.C:
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
numHandlers := len(h.handlers)
|
||||
numTokens := len(h.resetTokens)
|
||||
h.mutex.Unlock()
|
||||
// If the number tracked handlers and tokens is zero, only print it a single time.
|
||||
hasZero := numHandlers == 0 && numTokens == 0
|
||||
if !hasZero || (hasZero && !printedZero) {
|
||||
h.logger.Debugf("Tracking %d connection IDs and %d reset tokens.\n", numHandlers, numTokens)
|
||||
printedZero = false
|
||||
if hasZero {
|
||||
printedZero = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) Add(id protocol.ConnectionID, handler packetHandler) bool /* was added */ {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
if _, ok := h.handlers[string(id)]; ok {
|
||||
h.logger.Debugf("Not adding connection ID %s, as it already exists.", id)
|
||||
return false
|
||||
}
|
||||
h.handlers[string(id)] = packetHandlerMapEntry{packetHandler: handler}
|
||||
h.logger.Debugf("Adding connection ID %s.", id)
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) AddWithConnID(clientDestConnID, newConnID protocol.ConnectionID, fn func() packetHandler) bool {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
var q *zeroRTTQueue
|
||||
if entry, ok := h.handlers[string(clientDestConnID)]; ok {
|
||||
if !entry.is0RTTQueue {
|
||||
h.logger.Debugf("Not adding connection ID %s for a new connection, as it already exists.", clientDestConnID)
|
||||
return false
|
||||
}
|
||||
q = entry.packetHandler.(*zeroRTTQueue)
|
||||
q.retireTimer.Stop()
|
||||
h.numZeroRTTEntries--
|
||||
if h.numZeroRTTEntries < 0 {
|
||||
panic("number of 0-RTT queues < 0")
|
||||
}
|
||||
}
|
||||
sess := fn()
|
||||
if q != nil {
|
||||
q.EnqueueAll(sess)
|
||||
}
|
||||
h.handlers[string(clientDestConnID)] = packetHandlerMapEntry{packetHandler: sess}
|
||||
h.handlers[string(newConnID)] = packetHandlerMapEntry{packetHandler: sess}
|
||||
h.logger.Debugf("Adding connection IDs %s and %s for a new connection.", clientDestConnID, newConnID)
|
||||
return true
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) Remove(id protocol.ConnectionID) {
|
||||
h.mutex.Lock()
|
||||
delete(h.handlers, string(id))
|
||||
h.mutex.Unlock()
|
||||
h.logger.Debugf("Removing connection ID %s.", id)
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) Retire(id protocol.ConnectionID) {
|
||||
h.logger.Debugf("Retiring connection ID %s in %s.", id, h.deleteRetiredConnsAfter)
|
||||
time.AfterFunc(h.deleteRetiredConnsAfter, func() {
|
||||
h.mutex.Lock()
|
||||
delete(h.handlers, string(id))
|
||||
h.mutex.Unlock()
|
||||
h.logger.Debugf("Removing connection ID %s after it has been retired.", id)
|
||||
})
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) ReplaceWithClosed(id protocol.ConnectionID, handler packetHandler) {
|
||||
h.mutex.Lock()
|
||||
h.handlers[string(id)] = packetHandlerMapEntry{packetHandler: handler}
|
||||
h.mutex.Unlock()
|
||||
h.logger.Debugf("Replacing connection for connection ID %s with a closed connection.", id)
|
||||
|
||||
time.AfterFunc(h.deleteRetiredConnsAfter, func() {
|
||||
h.mutex.Lock()
|
||||
handler.shutdown()
|
||||
delete(h.handlers, string(id))
|
||||
h.mutex.Unlock()
|
||||
h.logger.Debugf("Removing connection ID %s for a closed connection after it has been retired.", id)
|
||||
})
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) AddResetToken(token protocol.StatelessResetToken, handler packetHandler) {
|
||||
h.mutex.Lock()
|
||||
h.resetTokens[token] = handler
|
||||
h.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) RemoveResetToken(token protocol.StatelessResetToken) {
|
||||
h.mutex.Lock()
|
||||
delete(h.resetTokens, token)
|
||||
h.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) SetServer(s unknownPacketHandler) {
|
||||
h.mutex.Lock()
|
||||
h.server = s
|
||||
h.mutex.Unlock()
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) CloseServer() {
|
||||
h.mutex.Lock()
|
||||
if h.server == nil {
|
||||
h.mutex.Unlock()
|
||||
return
|
||||
}
|
||||
h.server = nil
|
||||
var wg sync.WaitGroup
|
||||
for _, entry := range h.handlers {
|
||||
if entry.packetHandler.getPerspective() == protocol.PerspectiveServer {
|
||||
wg.Add(1)
|
||||
go func(handler packetHandler) {
|
||||
// blocks until the CONNECTION_CLOSE has been sent and the run-loop has stopped
|
||||
handler.shutdown()
|
||||
wg.Done()
|
||||
}(entry.packetHandler)
|
||||
}
|
||||
}
|
||||
h.mutex.Unlock()
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
// Destroy closes the underlying connection and waits until listen() has returned.
|
||||
// It does not close active connections.
|
||||
func (h *packetHandlerMap) Destroy() error {
|
||||
if err := h.conn.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
<-h.listening // wait until listening returns
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) close(e error) error {
|
||||
h.mutex.Lock()
|
||||
if h.closed {
|
||||
h.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for _, entry := range h.handlers {
|
||||
wg.Add(1)
|
||||
go func(handler packetHandler) {
|
||||
handler.destroy(e)
|
||||
wg.Done()
|
||||
}(entry.packetHandler)
|
||||
}
|
||||
|
||||
if h.server != nil {
|
||||
h.server.setCloseError(e)
|
||||
}
|
||||
h.closed = true
|
||||
h.mutex.Unlock()
|
||||
wg.Wait()
|
||||
return getMultiplexer().RemoveConn(h.conn)
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) listen() {
|
||||
defer close(h.listening)
|
||||
for {
|
||||
p, err := h.conn.ReadPacket()
|
||||
//nolint:staticcheck // SA1019 ignore this!
|
||||
// TODO: This code is used to ignore wsa errors on Windows.
|
||||
// Since net.Error.Temporary is deprecated as of Go 1.18, we should find a better solution.
|
||||
// See https://github.com/lucas-clemente/quic-go/issues/1737 for details.
|
||||
if nerr, ok := err.(net.Error); ok && nerr.Temporary() {
|
||||
h.logger.Debugf("Temporary error reading from conn: %w", err)
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
h.close(err)
|
||||
return
|
||||
}
|
||||
h.handlePacket(p)
|
||||
}
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) handlePacket(p *receivedPacket) {
|
||||
connID, err := wire.ParseConnectionID(p.data, h.connIDLen)
|
||||
if err != nil {
|
||||
h.logger.Debugf("error parsing connection ID on packet from %s: %s", p.remoteAddr, err)
|
||||
if h.tracer != nil {
|
||||
h.tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError)
|
||||
}
|
||||
p.buffer.MaybeRelease()
|
||||
return
|
||||
}
|
||||
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
|
||||
if isStatelessReset := h.maybeHandleStatelessReset(p.data); isStatelessReset {
|
||||
return
|
||||
}
|
||||
|
||||
if entry, ok := h.handlers[string(connID)]; ok {
|
||||
if entry.is0RTTQueue { // only enqueue 0-RTT packets in the 0-RTT queue
|
||||
if wire.Is0RTTPacket(p.data) {
|
||||
entry.packetHandler.handlePacket(p)
|
||||
return
|
||||
}
|
||||
} else { // existing connection
|
||||
entry.packetHandler.handlePacket(p)
|
||||
return
|
||||
}
|
||||
}
|
||||
if p.data[0]&0x80 == 0 {
|
||||
go h.maybeSendStatelessReset(p, connID)
|
||||
return
|
||||
}
|
||||
if h.server == nil { // no server set
|
||||
h.logger.Debugf("received a packet with an unexpected connection ID %s", connID)
|
||||
return
|
||||
}
|
||||
if wire.Is0RTTPacket(p.data) {
|
||||
if h.numZeroRTTEntries >= protocol.Max0RTTQueues {
|
||||
return
|
||||
}
|
||||
h.numZeroRTTEntries++
|
||||
queue := &zeroRTTQueue{queue: make([]*receivedPacket, 0, 8)}
|
||||
h.handlers[string(connID)] = packetHandlerMapEntry{
|
||||
packetHandler: queue,
|
||||
is0RTTQueue: true,
|
||||
}
|
||||
queue.retireTimer = time.AfterFunc(h.zeroRTTQueueDuration, func() {
|
||||
h.mutex.Lock()
|
||||
defer h.mutex.Unlock()
|
||||
// The entry might have been replaced by an actual connection.
|
||||
// Only delete it if it's still a 0-RTT queue.
|
||||
if entry, ok := h.handlers[string(connID)]; ok && entry.is0RTTQueue {
|
||||
delete(h.handlers, string(connID))
|
||||
h.numZeroRTTEntries--
|
||||
if h.numZeroRTTEntries < 0 {
|
||||
panic("number of 0-RTT queues < 0")
|
||||
}
|
||||
entry.packetHandler.(*zeroRTTQueue).Clear()
|
||||
if h.logger.Debug() {
|
||||
h.logger.Debugf("Removing 0-RTT queue for %s.", connID)
|
||||
}
|
||||
}
|
||||
})
|
||||
queue.handlePacket(p)
|
||||
return
|
||||
}
|
||||
h.server.handlePacket(p)
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) maybeHandleStatelessReset(data []byte) bool {
|
||||
// stateless resets are always short header packets
|
||||
if data[0]&0x80 != 0 {
|
||||
return false
|
||||
}
|
||||
if len(data) < 17 /* type byte + 16 bytes for the reset token */ {
|
||||
return false
|
||||
}
|
||||
|
||||
var token protocol.StatelessResetToken
|
||||
copy(token[:], data[len(data)-16:])
|
||||
if sess, ok := h.resetTokens[token]; ok {
|
||||
h.logger.Debugf("Received a stateless reset with token %#x. Closing connection.", token)
|
||||
go sess.destroy(&StatelessResetError{Token: token})
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) GetStatelessResetToken(connID protocol.ConnectionID) protocol.StatelessResetToken {
|
||||
var token protocol.StatelessResetToken
|
||||
if !h.statelessResetEnabled {
|
||||
// Return a random stateless reset token.
|
||||
// This token will be sent in the server's transport parameters.
|
||||
// By using a random token, an off-path attacker won't be able to disrupt the connection.
|
||||
rand.Read(token[:])
|
||||
return token
|
||||
}
|
||||
h.statelessResetMutex.Lock()
|
||||
h.statelessResetHasher.Write(connID.Bytes())
|
||||
copy(token[:], h.statelessResetHasher.Sum(nil))
|
||||
h.statelessResetHasher.Reset()
|
||||
h.statelessResetMutex.Unlock()
|
||||
return token
|
||||
}
|
||||
|
||||
func (h *packetHandlerMap) maybeSendStatelessReset(p *receivedPacket, connID protocol.ConnectionID) {
|
||||
defer p.buffer.Release()
|
||||
if !h.statelessResetEnabled {
|
||||
return
|
||||
}
|
||||
// Don't send a stateless reset in response to very small packets.
|
||||
// This includes packets that could be stateless resets.
|
||||
if len(p.data) <= protocol.MinStatelessResetSize {
|
||||
return
|
||||
}
|
||||
token := h.GetStatelessResetToken(connID)
|
||||
h.logger.Debugf("Sending stateless reset to %s (connection ID: %s). Token: %#x", p.remoteAddr, connID, token)
|
||||
data := make([]byte, protocol.MinStatelessResetSize-16, protocol.MinStatelessResetSize)
|
||||
rand.Read(data)
|
||||
data[0] = (data[0] & 0x7f) | 0x40
|
||||
data = append(data, token[:]...)
|
||||
if _, err := h.conn.WritePacket(data, p.remoteAddr, p.info.OOB()); err != nil {
|
||||
h.logger.Debugf("Error sending Stateless Reset: %s", err)
|
||||
}
|
||||
}
|
|
@ -1,894 +0,0 @@
|
|||
package quic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/ackhandler"
|
||||
"github.com/lucas-clemente/quic-go/internal/handshake"
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/qerr"
|
||||
"github.com/lucas-clemente/quic-go/internal/utils"
|
||||
"github.com/lucas-clemente/quic-go/internal/wire"
|
||||
)
|
||||
|
||||
type packer interface {
|
||||
PackCoalescedPacket() (*coalescedPacket, error)
|
||||
PackPacket() (*packedPacket, error)
|
||||
MaybePackProbePacket(protocol.EncryptionLevel) (*packedPacket, error)
|
||||
MaybePackAckPacket(handshakeConfirmed bool) (*packedPacket, error)
|
||||
PackConnectionClose(*qerr.TransportError) (*coalescedPacket, error)
|
||||
PackApplicationClose(*qerr.ApplicationError) (*coalescedPacket, error)
|
||||
|
||||
SetMaxPacketSize(protocol.ByteCount)
|
||||
PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount) (*packedPacket, error)
|
||||
|
||||
HandleTransportParameters(*wire.TransportParameters)
|
||||
SetToken([]byte)
|
||||
}
|
||||
|
||||
type sealer interface {
|
||||
handshake.LongHeaderSealer
|
||||
}
|
||||
|
||||
type payload struct {
|
||||
frames []ackhandler.Frame
|
||||
ack *wire.AckFrame
|
||||
length protocol.ByteCount
|
||||
}
|
||||
|
||||
type packedPacket struct {
|
||||
buffer *packetBuffer
|
||||
*packetContents
|
||||
}
|
||||
|
||||
type packetContents struct {
|
||||
header *wire.ExtendedHeader
|
||||
ack *wire.AckFrame
|
||||
frames []ackhandler.Frame
|
||||
|
||||
length protocol.ByteCount
|
||||
|
||||
isMTUProbePacket bool
|
||||
}
|
||||
|
||||
type coalescedPacket struct {
|
||||
buffer *packetBuffer
|
||||
packets []*packetContents
|
||||
}
|
||||
|
||||
func (p *packetContents) EncryptionLevel() protocol.EncryptionLevel {
|
||||
if !p.header.IsLongHeader {
|
||||
return protocol.Encryption1RTT
|
||||
}
|
||||
//nolint:exhaustive // Will never be called for Retry packets (and they don't have encrypted data).
|
||||
switch p.header.Type {
|
||||
case protocol.PacketTypeInitial:
|
||||
return protocol.EncryptionInitial
|
||||
case protocol.PacketTypeHandshake:
|
||||
return protocol.EncryptionHandshake
|
||||
case protocol.PacketType0RTT:
|
||||
return protocol.Encryption0RTT
|
||||
default:
|
||||
panic("can't determine encryption level")
|
||||
}
|
||||
}
|
||||
|
||||
func (p *packetContents) IsAckEliciting() bool {
|
||||
return ackhandler.HasAckElicitingFrames(p.frames)
|
||||
}
|
||||
|
||||
func (p *packetContents) ToAckHandlerPacket(now time.Time, q *retransmissionQueue) *ackhandler.Packet {
|
||||
largestAcked := protocol.InvalidPacketNumber
|
||||
if p.ack != nil {
|
||||
largestAcked = p.ack.LargestAcked()
|
||||
}
|
||||
encLevel := p.EncryptionLevel()
|
||||
for i := range p.frames {
|
||||
if p.frames[i].OnLost != nil {
|
||||
continue
|
||||
}
|
||||
switch encLevel {
|
||||
case protocol.EncryptionInitial:
|
||||
p.frames[i].OnLost = q.AddInitial
|
||||
case protocol.EncryptionHandshake:
|
||||
p.frames[i].OnLost = q.AddHandshake
|
||||
case protocol.Encryption0RTT, protocol.Encryption1RTT:
|
||||
p.frames[i].OnLost = q.AddAppData
|
||||
}
|
||||
}
|
||||
return &ackhandler.Packet{
|
||||
PacketNumber: p.header.PacketNumber,
|
||||
LargestAcked: largestAcked,
|
||||
Frames: p.frames,
|
||||
Length: p.length,
|
||||
EncryptionLevel: encLevel,
|
||||
SendTime: now,
|
||||
IsPathMTUProbePacket: p.isMTUProbePacket,
|
||||
}
|
||||
}
|
||||
|
||||
func getMaxPacketSize(addr net.Addr) protocol.ByteCount {
|
||||
maxSize := protocol.ByteCount(protocol.MinInitialPacketSize)
|
||||
// If this is not a UDP address, we don't know anything about the MTU.
|
||||
// Use the minimum size of an Initial packet as the max packet size.
|
||||
if udpAddr, ok := addr.(*net.UDPAddr); ok {
|
||||
if utils.IsIPv4(udpAddr.IP) {
|
||||
maxSize = protocol.InitialPacketSizeIPv4
|
||||
} else {
|
||||
maxSize = protocol.InitialPacketSizeIPv6
|
||||
}
|
||||
}
|
||||
return maxSize
|
||||
}
|
||||
|
||||
type packetNumberManager interface {
|
||||
PeekPacketNumber(protocol.EncryptionLevel) (protocol.PacketNumber, protocol.PacketNumberLen)
|
||||
PopPacketNumber(protocol.EncryptionLevel) protocol.PacketNumber
|
||||
}
|
||||
|
||||
type sealingManager interface {
|
||||
GetInitialSealer() (handshake.LongHeaderSealer, error)
|
||||
GetHandshakeSealer() (handshake.LongHeaderSealer, error)
|
||||
Get0RTTSealer() (handshake.LongHeaderSealer, error)
|
||||
Get1RTTSealer() (handshake.ShortHeaderSealer, error)
|
||||
}
|
||||
|
||||
type frameSource interface {
|
||||
HasData() bool
|
||||
AppendStreamFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount)
|
||||
AppendControlFrames([]ackhandler.Frame, protocol.ByteCount) ([]ackhandler.Frame, protocol.ByteCount)
|
||||
}
|
||||
|
||||
type ackFrameSource interface {
|
||||
GetAckFrame(encLevel protocol.EncryptionLevel, onlyIfQueued bool) *wire.AckFrame
|
||||
}
|
||||
|
||||
type packetPacker struct {
|
||||
srcConnID protocol.ConnectionID
|
||||
getDestConnID func() protocol.ConnectionID
|
||||
|
||||
perspective protocol.Perspective
|
||||
version protocol.VersionNumber
|
||||
cryptoSetup sealingManager
|
||||
|
||||
initialStream cryptoStream
|
||||
handshakeStream cryptoStream
|
||||
|
||||
token []byte
|
||||
|
||||
pnManager packetNumberManager
|
||||
framer frameSource
|
||||
acks ackFrameSource
|
||||
datagramQueue *datagramQueue
|
||||
retransmissionQueue *retransmissionQueue
|
||||
|
||||
maxPacketSize protocol.ByteCount
|
||||
numNonAckElicitingAcks int
|
||||
}
|
||||
|
||||
var _ packer = &packetPacker{}
|
||||
|
||||
func newPacketPacker(
|
||||
srcConnID protocol.ConnectionID,
|
||||
getDestConnID func() protocol.ConnectionID,
|
||||
initialStream cryptoStream,
|
||||
handshakeStream cryptoStream,
|
||||
packetNumberManager packetNumberManager,
|
||||
retransmissionQueue *retransmissionQueue,
|
||||
remoteAddr net.Addr, // only used for determining the max packet size
|
||||
cryptoSetup sealingManager,
|
||||
framer frameSource,
|
||||
acks ackFrameSource,
|
||||
datagramQueue *datagramQueue,
|
||||
perspective protocol.Perspective,
|
||||
version protocol.VersionNumber,
|
||||
) *packetPacker {
|
||||
return &packetPacker{
|
||||
cryptoSetup: cryptoSetup,
|
||||
getDestConnID: getDestConnID,
|
||||
srcConnID: srcConnID,
|
||||
initialStream: initialStream,
|
||||
handshakeStream: handshakeStream,
|
||||
retransmissionQueue: retransmissionQueue,
|
||||
datagramQueue: datagramQueue,
|
||||
perspective: perspective,
|
||||
version: version,
|
||||
framer: framer,
|
||||
acks: acks,
|
||||
pnManager: packetNumberManager,
|
||||
maxPacketSize: getMaxPacketSize(remoteAddr),
|
||||
}
|
||||
}
|
||||
|
||||
// PackConnectionClose packs a packet that closes the connection with a transport error.
|
||||
func (p *packetPacker) PackConnectionClose(e *qerr.TransportError) (*coalescedPacket, error) {
|
||||
var reason string
|
||||
// don't send details of crypto errors
|
||||
if !e.ErrorCode.IsCryptoError() {
|
||||
reason = e.ErrorMessage
|
||||
}
|
||||
return p.packConnectionClose(false, uint64(e.ErrorCode), e.FrameType, reason)
|
||||
}
|
||||
|
||||
// PackApplicationClose packs a packet that closes the connection with an application error.
|
||||
func (p *packetPacker) PackApplicationClose(e *qerr.ApplicationError) (*coalescedPacket, error) {
|
||||
return p.packConnectionClose(true, uint64(e.ErrorCode), 0, e.ErrorMessage)
|
||||
}
|
||||
|
||||
func (p *packetPacker) packConnectionClose(
|
||||
isApplicationError bool,
|
||||
errorCode uint64,
|
||||
frameType uint64,
|
||||
reason string,
|
||||
) (*coalescedPacket, error) {
|
||||
var sealers [4]sealer
|
||||
var hdrs [4]*wire.ExtendedHeader
|
||||
var payloads [4]*payload
|
||||
var size protocol.ByteCount
|
||||
var numPackets uint8
|
||||
encLevels := [4]protocol.EncryptionLevel{protocol.EncryptionInitial, protocol.EncryptionHandshake, protocol.Encryption0RTT, protocol.Encryption1RTT}
|
||||
for i, encLevel := range encLevels {
|
||||
if p.perspective == protocol.PerspectiveServer && encLevel == protocol.Encryption0RTT {
|
||||
continue
|
||||
}
|
||||
ccf := &wire.ConnectionCloseFrame{
|
||||
IsApplicationError: isApplicationError,
|
||||
ErrorCode: errorCode,
|
||||
FrameType: frameType,
|
||||
ReasonPhrase: reason,
|
||||
}
|
||||
// don't send application errors in Initial or Handshake packets
|
||||
if isApplicationError && (encLevel == protocol.EncryptionInitial || encLevel == protocol.EncryptionHandshake) {
|
||||
ccf.IsApplicationError = false
|
||||
ccf.ErrorCode = uint64(qerr.ApplicationErrorErrorCode)
|
||||
ccf.ReasonPhrase = ""
|
||||
}
|
||||
payload := &payload{
|
||||
frames: []ackhandler.Frame{{Frame: ccf}},
|
||||
length: ccf.Length(p.version),
|
||||
}
|
||||
|
||||
var sealer sealer
|
||||
var err error
|
||||
var keyPhase protocol.KeyPhaseBit // only set for 1-RTT
|
||||
switch encLevel {
|
||||
case protocol.EncryptionInitial:
|
||||
sealer, err = p.cryptoSetup.GetInitialSealer()
|
||||
case protocol.EncryptionHandshake:
|
||||
sealer, err = p.cryptoSetup.GetHandshakeSealer()
|
||||
case protocol.Encryption0RTT:
|
||||
sealer, err = p.cryptoSetup.Get0RTTSealer()
|
||||
case protocol.Encryption1RTT:
|
||||
var s handshake.ShortHeaderSealer
|
||||
s, err = p.cryptoSetup.Get1RTTSealer()
|
||||
if err == nil {
|
||||
keyPhase = s.KeyPhase()
|
||||
}
|
||||
sealer = s
|
||||
}
|
||||
if err == handshake.ErrKeysNotYetAvailable || err == handshake.ErrKeysDropped {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sealers[i] = sealer
|
||||
var hdr *wire.ExtendedHeader
|
||||
if encLevel == protocol.Encryption1RTT {
|
||||
hdr = p.getShortHeader(keyPhase)
|
||||
} else {
|
||||
hdr = p.getLongHeader(encLevel)
|
||||
}
|
||||
hdrs[i] = hdr
|
||||
payloads[i] = payload
|
||||
size += p.packetLength(hdr, payload) + protocol.ByteCount(sealer.Overhead())
|
||||
numPackets++
|
||||
}
|
||||
contents := make([]*packetContents, 0, numPackets)
|
||||
buffer := getPacketBuffer()
|
||||
for i, encLevel := range encLevels {
|
||||
if sealers[i] == nil {
|
||||
continue
|
||||
}
|
||||
var paddingLen protocol.ByteCount
|
||||
if encLevel == protocol.EncryptionInitial {
|
||||
paddingLen = p.initialPaddingLen(payloads[i].frames, size)
|
||||
}
|
||||
c, err := p.appendPacket(buffer, hdrs[i], payloads[i], paddingLen, encLevel, sealers[i], false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contents = append(contents, c)
|
||||
}
|
||||
return &coalescedPacket{buffer: buffer, packets: contents}, nil
|
||||
}
|
||||
|
||||
// packetLength calculates the length of the serialized packet.
|
||||
// It takes into account that packets that have a tiny payload need to be padded,
|
||||
// such that len(payload) + packet number len >= 4 + AEAD overhead
|
||||
func (p *packetPacker) packetLength(hdr *wire.ExtendedHeader, payload *payload) protocol.ByteCount {
|
||||
var paddingLen protocol.ByteCount
|
||||
pnLen := protocol.ByteCount(hdr.PacketNumberLen)
|
||||
if payload.length < 4-pnLen {
|
||||
paddingLen = 4 - pnLen - payload.length
|
||||
}
|
||||
return hdr.GetLength(p.version) + payload.length + paddingLen
|
||||
}
|
||||
|
||||
func (p *packetPacker) MaybePackAckPacket(handshakeConfirmed bool) (*packedPacket, error) {
|
||||
var encLevel protocol.EncryptionLevel
|
||||
var ack *wire.AckFrame
|
||||
if !handshakeConfirmed {
|
||||
ack = p.acks.GetAckFrame(protocol.EncryptionInitial, true)
|
||||
if ack != nil {
|
||||
encLevel = protocol.EncryptionInitial
|
||||
} else {
|
||||
ack = p.acks.GetAckFrame(protocol.EncryptionHandshake, true)
|
||||
if ack != nil {
|
||||
encLevel = protocol.EncryptionHandshake
|
||||
}
|
||||
}
|
||||
}
|
||||
if ack == nil {
|
||||
ack = p.acks.GetAckFrame(protocol.Encryption1RTT, true)
|
||||
if ack == nil {
|
||||
return nil, nil
|
||||
}
|
||||
encLevel = protocol.Encryption1RTT
|
||||
}
|
||||
payload := &payload{
|
||||
ack: ack,
|
||||
length: ack.Length(p.version),
|
||||
}
|
||||
|
||||
sealer, hdr, err := p.getSealerAndHeader(encLevel)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return p.writeSinglePacket(hdr, payload, encLevel, sealer)
|
||||
}
|
||||
|
||||
// size is the expected size of the packet, if no padding was applied.
|
||||
func (p *packetPacker) initialPaddingLen(frames []ackhandler.Frame, size protocol.ByteCount) protocol.ByteCount {
|
||||
// For the server, only ack-eliciting Initial packets need to be padded.
|
||||
if p.perspective == protocol.PerspectiveServer && !ackhandler.HasAckElicitingFrames(frames) {
|
||||
return 0
|
||||
}
|
||||
if size >= p.maxPacketSize {
|
||||
return 0
|
||||
}
|
||||
return p.maxPacketSize - size
|
||||
}
|
||||
|
||||
// PackCoalescedPacket packs a new packet.
|
||||
// It packs an Initial / Handshake if there is data to send in these packet number spaces.
|
||||
// It should only be called before the handshake is confirmed.
|
||||
func (p *packetPacker) PackCoalescedPacket() (*coalescedPacket, error) {
|
||||
maxPacketSize := p.maxPacketSize
|
||||
if p.perspective == protocol.PerspectiveClient {
|
||||
maxPacketSize = protocol.MinInitialPacketSize
|
||||
}
|
||||
var initialHdr, handshakeHdr, appDataHdr *wire.ExtendedHeader
|
||||
var initialPayload, handshakePayload, appDataPayload *payload
|
||||
var numPackets int
|
||||
// Try packing an Initial packet.
|
||||
initialSealer, err := p.cryptoSetup.GetInitialSealer()
|
||||
if err != nil && err != handshake.ErrKeysDropped {
|
||||
return nil, err
|
||||
}
|
||||
var size protocol.ByteCount
|
||||
if initialSealer != nil {
|
||||
initialHdr, initialPayload = p.maybeGetCryptoPacket(maxPacketSize-protocol.ByteCount(initialSealer.Overhead()), size, protocol.EncryptionInitial)
|
||||
if initialPayload != nil {
|
||||
size += p.packetLength(initialHdr, initialPayload) + protocol.ByteCount(initialSealer.Overhead())
|
||||
numPackets++
|
||||
}
|
||||
}
|
||||
|
||||
// Add a Handshake packet.
|
||||
var handshakeSealer sealer
|
||||
if size < maxPacketSize-protocol.MinCoalescedPacketSize {
|
||||
var err error
|
||||
handshakeSealer, err = p.cryptoSetup.GetHandshakeSealer()
|
||||
if err != nil && err != handshake.ErrKeysDropped && err != handshake.ErrKeysNotYetAvailable {
|
||||
return nil, err
|
||||
}
|
||||
if handshakeSealer != nil {
|
||||
handshakeHdr, handshakePayload = p.maybeGetCryptoPacket(maxPacketSize-size-protocol.ByteCount(handshakeSealer.Overhead()), size, protocol.EncryptionHandshake)
|
||||
if handshakePayload != nil {
|
||||
s := p.packetLength(handshakeHdr, handshakePayload) + protocol.ByteCount(handshakeSealer.Overhead())
|
||||
size += s
|
||||
numPackets++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Add a 0-RTT / 1-RTT packet.
|
||||
var appDataSealer sealer
|
||||
appDataEncLevel := protocol.Encryption1RTT
|
||||
if size < maxPacketSize-protocol.MinCoalescedPacketSize {
|
||||
var err error
|
||||
appDataSealer, appDataHdr, appDataPayload = p.maybeGetAppDataPacket(maxPacketSize-size, size)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if appDataHdr != nil {
|
||||
if appDataHdr.IsLongHeader {
|
||||
appDataEncLevel = protocol.Encryption0RTT
|
||||
}
|
||||
if appDataPayload != nil {
|
||||
size += p.packetLength(appDataHdr, appDataPayload) + protocol.ByteCount(appDataSealer.Overhead())
|
||||
numPackets++
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if numPackets == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
buffer := getPacketBuffer()
|
||||
packet := &coalescedPacket{
|
||||
buffer: buffer,
|
||||
packets: make([]*packetContents, 0, numPackets),
|
||||
}
|
||||
if initialPayload != nil {
|
||||
padding := p.initialPaddingLen(initialPayload.frames, size)
|
||||
cont, err := p.appendPacket(buffer, initialHdr, initialPayload, padding, protocol.EncryptionInitial, initialSealer, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packet.packets = append(packet.packets, cont)
|
||||
}
|
||||
if handshakePayload != nil {
|
||||
cont, err := p.appendPacket(buffer, handshakeHdr, handshakePayload, 0, protocol.EncryptionHandshake, handshakeSealer, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packet.packets = append(packet.packets, cont)
|
||||
}
|
||||
if appDataPayload != nil {
|
||||
cont, err := p.appendPacket(buffer, appDataHdr, appDataPayload, 0, appDataEncLevel, appDataSealer, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
packet.packets = append(packet.packets, cont)
|
||||
}
|
||||
return packet, nil
|
||||
}
|
||||
|
||||
// PackPacket packs a packet in the application data packet number space.
|
||||
// It should be called after the handshake is confirmed.
|
||||
func (p *packetPacker) PackPacket() (*packedPacket, error) {
|
||||
sealer, hdr, payload := p.maybeGetAppDataPacket(p.maxPacketSize, 0)
|
||||
if payload == nil {
|
||||
return nil, nil
|
||||
}
|
||||
buffer := getPacketBuffer()
|
||||
encLevel := protocol.Encryption1RTT
|
||||
if hdr.IsLongHeader {
|
||||
encLevel = protocol.Encryption0RTT
|
||||
}
|
||||
cont, err := p.appendPacket(buffer, hdr, payload, 0, encLevel, sealer, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &packedPacket{
|
||||
buffer: buffer,
|
||||
packetContents: cont,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *packetPacker) maybeGetCryptoPacket(maxPacketSize, currentSize protocol.ByteCount, encLevel protocol.EncryptionLevel) (*wire.ExtendedHeader, *payload) {
|
||||
var s cryptoStream
|
||||
var hasRetransmission bool
|
||||
//nolint:exhaustive // Initial and Handshake are the only two encryption levels here.
|
||||
switch encLevel {
|
||||
case protocol.EncryptionInitial:
|
||||
s = p.initialStream
|
||||
hasRetransmission = p.retransmissionQueue.HasInitialData()
|
||||
case protocol.EncryptionHandshake:
|
||||
s = p.handshakeStream
|
||||
hasRetransmission = p.retransmissionQueue.HasHandshakeData()
|
||||
}
|
||||
|
||||
hasData := s.HasData()
|
||||
var ack *wire.AckFrame
|
||||
if encLevel == protocol.EncryptionInitial || currentSize == 0 {
|
||||
ack = p.acks.GetAckFrame(encLevel, !hasRetransmission && !hasData)
|
||||
}
|
||||
if !hasData && !hasRetransmission && ack == nil {
|
||||
// nothing to send
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
var payload payload
|
||||
if ack != nil {
|
||||
payload.ack = ack
|
||||
payload.length = ack.Length(p.version)
|
||||
maxPacketSize -= payload.length
|
||||
}
|
||||
hdr := p.getLongHeader(encLevel)
|
||||
maxPacketSize -= hdr.GetLength(p.version)
|
||||
if hasRetransmission {
|
||||
for {
|
||||
var f wire.Frame
|
||||
//nolint:exhaustive // 0-RTT packets can't contain any retransmission.s
|
||||
switch encLevel {
|
||||
case protocol.EncryptionInitial:
|
||||
f = p.retransmissionQueue.GetInitialFrame(maxPacketSize)
|
||||
case protocol.EncryptionHandshake:
|
||||
f = p.retransmissionQueue.GetHandshakeFrame(maxPacketSize)
|
||||
}
|
||||
if f == nil {
|
||||
break
|
||||
}
|
||||
payload.frames = append(payload.frames, ackhandler.Frame{Frame: f})
|
||||
frameLen := f.Length(p.version)
|
||||
payload.length += frameLen
|
||||
maxPacketSize -= frameLen
|
||||
}
|
||||
} else if s.HasData() {
|
||||
cf := s.PopCryptoFrame(maxPacketSize)
|
||||
payload.frames = []ackhandler.Frame{{Frame: cf}}
|
||||
payload.length += cf.Length(p.version)
|
||||
}
|
||||
return hdr, &payload
|
||||
}
|
||||
|
||||
func (p *packetPacker) maybeGetAppDataPacket(maxPacketSize, currentSize protocol.ByteCount) (sealer, *wire.ExtendedHeader, *payload) {
|
||||
var sealer sealer
|
||||
var encLevel protocol.EncryptionLevel
|
||||
var hdr *wire.ExtendedHeader
|
||||
oneRTTSealer, err := p.cryptoSetup.Get1RTTSealer()
|
||||
if err == nil {
|
||||
encLevel = protocol.Encryption1RTT
|
||||
sealer = oneRTTSealer
|
||||
hdr = p.getShortHeader(oneRTTSealer.KeyPhase())
|
||||
} else {
|
||||
// 1-RTT sealer not yet available
|
||||
if p.perspective != protocol.PerspectiveClient {
|
||||
return nil, nil, nil
|
||||
}
|
||||
sealer, err = p.cryptoSetup.Get0RTTSealer()
|
||||
if sealer == nil || err != nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
encLevel = protocol.Encryption0RTT
|
||||
hdr = p.getLongHeader(protocol.Encryption0RTT)
|
||||
}
|
||||
|
||||
maxPayloadSize := maxPacketSize - hdr.GetLength(p.version) - protocol.ByteCount(sealer.Overhead())
|
||||
payload := p.maybeGetAppDataPacketWithEncLevel(maxPayloadSize, encLevel == protocol.Encryption1RTT && currentSize == 0)
|
||||
return sealer, hdr, payload
|
||||
}
|
||||
|
||||
func (p *packetPacker) maybeGetAppDataPacketWithEncLevel(maxPayloadSize protocol.ByteCount, ackAllowed bool) *payload {
|
||||
payload := p.composeNextPacket(maxPayloadSize, ackAllowed)
|
||||
|
||||
// check if we have anything to send
|
||||
if len(payload.frames) == 0 {
|
||||
if payload.ack == nil {
|
||||
return nil
|
||||
}
|
||||
// the packet only contains an ACK
|
||||
if p.numNonAckElicitingAcks >= protocol.MaxNonAckElicitingAcks {
|
||||
ping := &wire.PingFrame{}
|
||||
// don't retransmit the PING frame when it is lost
|
||||
payload.frames = append(payload.frames, ackhandler.Frame{Frame: ping, OnLost: func(wire.Frame) {}})
|
||||
payload.length += ping.Length(p.version)
|
||||
p.numNonAckElicitingAcks = 0
|
||||
} else {
|
||||
p.numNonAckElicitingAcks++
|
||||
}
|
||||
} else {
|
||||
p.numNonAckElicitingAcks = 0
|
||||
}
|
||||
return payload
|
||||
}
|
||||
|
||||
func (p *packetPacker) composeNextPacket(maxFrameSize protocol.ByteCount, ackAllowed bool) *payload {
|
||||
payload := &payload{frames: make([]ackhandler.Frame, 0, 1)}
|
||||
|
||||
var hasDatagram bool
|
||||
if p.datagramQueue != nil {
|
||||
if datagram := p.datagramQueue.Get(maxFrameSize, p.version); datagram != nil {
|
||||
payload.frames = append(payload.frames, ackhandler.Frame{
|
||||
Frame: datagram,
|
||||
// set it to a no-op. Then we won't set the default callback, which would retransmit the frame.
|
||||
OnLost: func(wire.Frame) {},
|
||||
})
|
||||
payload.length += datagram.Length(p.version)
|
||||
hasDatagram = true
|
||||
}
|
||||
}
|
||||
|
||||
var ack *wire.AckFrame
|
||||
hasData := p.framer.HasData()
|
||||
hasRetransmission := p.retransmissionQueue.HasAppData()
|
||||
// TODO: make sure ACKs are sent when a lot of DATAGRAMs are queued
|
||||
if !hasDatagram && ackAllowed {
|
||||
ack = p.acks.GetAckFrame(protocol.Encryption1RTT, !hasRetransmission && !hasData)
|
||||
if ack != nil {
|
||||
payload.ack = ack
|
||||
payload.length += ack.Length(p.version)
|
||||
}
|
||||
}
|
||||
|
||||
if ack == nil && !hasData && !hasRetransmission {
|
||||
return payload
|
||||
}
|
||||
|
||||
if hasRetransmission {
|
||||
for {
|
||||
remainingLen := maxFrameSize - payload.length
|
||||
if remainingLen < protocol.MinStreamFrameSize {
|
||||
break
|
||||
}
|
||||
f := p.retransmissionQueue.GetAppDataFrame(remainingLen)
|
||||
if f == nil {
|
||||
break
|
||||
}
|
||||
payload.frames = append(payload.frames, ackhandler.Frame{Frame: f})
|
||||
payload.length += f.Length(p.version)
|
||||
}
|
||||
}
|
||||
|
||||
if hasData {
|
||||
var lengthAdded protocol.ByteCount
|
||||
payload.frames, lengthAdded = p.framer.AppendControlFrames(payload.frames, maxFrameSize-payload.length)
|
||||
payload.length += lengthAdded
|
||||
|
||||
payload.frames, lengthAdded = p.framer.AppendStreamFrames(payload.frames, maxFrameSize-payload.length)
|
||||
payload.length += lengthAdded
|
||||
}
|
||||
return payload
|
||||
}
|
||||
|
||||
func (p *packetPacker) MaybePackProbePacket(encLevel protocol.EncryptionLevel) (*packedPacket, error) {
|
||||
var hdr *wire.ExtendedHeader
|
||||
var payload *payload
|
||||
var sealer sealer
|
||||
//nolint:exhaustive // Probe packets are never sent for 0-RTT.
|
||||
switch encLevel {
|
||||
case protocol.EncryptionInitial:
|
||||
var err error
|
||||
sealer, err = p.cryptoSetup.GetInitialSealer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), 0, protocol.EncryptionInitial)
|
||||
case protocol.EncryptionHandshake:
|
||||
var err error
|
||||
sealer, err = p.cryptoSetup.GetHandshakeSealer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr, payload = p.maybeGetCryptoPacket(p.maxPacketSize-protocol.ByteCount(sealer.Overhead()), 0, protocol.EncryptionHandshake)
|
||||
case protocol.Encryption1RTT:
|
||||
oneRTTSealer, err := p.cryptoSetup.Get1RTTSealer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sealer = oneRTTSealer
|
||||
hdr = p.getShortHeader(oneRTTSealer.KeyPhase())
|
||||
payload = p.maybeGetAppDataPacketWithEncLevel(p.maxPacketSize-protocol.ByteCount(sealer.Overhead())-hdr.GetLength(p.version), true)
|
||||
default:
|
||||
panic("unknown encryption level")
|
||||
}
|
||||
if payload == nil {
|
||||
return nil, nil
|
||||
}
|
||||
size := p.packetLength(hdr, payload) + protocol.ByteCount(sealer.Overhead())
|
||||
var padding protocol.ByteCount
|
||||
if encLevel == protocol.EncryptionInitial {
|
||||
padding = p.initialPaddingLen(payload.frames, size)
|
||||
}
|
||||
buffer := getPacketBuffer()
|
||||
cont, err := p.appendPacket(buffer, hdr, payload, padding, encLevel, sealer, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &packedPacket{
|
||||
buffer: buffer,
|
||||
packetContents: cont,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *packetPacker) PackMTUProbePacket(ping ackhandler.Frame, size protocol.ByteCount) (*packedPacket, error) {
|
||||
payload := &payload{
|
||||
frames: []ackhandler.Frame{ping},
|
||||
length: ping.Length(p.version),
|
||||
}
|
||||
buffer := getPacketBuffer()
|
||||
sealer, err := p.cryptoSetup.Get1RTTSealer()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
hdr := p.getShortHeader(sealer.KeyPhase())
|
||||
padding := size - p.packetLength(hdr, payload) - protocol.ByteCount(sealer.Overhead())
|
||||
contents, err := p.appendPacket(buffer, hdr, payload, padding, protocol.Encryption1RTT, sealer, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contents.isMTUProbePacket = true
|
||||
return &packedPacket{
|
||||
buffer: buffer,
|
||||
packetContents: contents,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *packetPacker) getSealerAndHeader(encLevel protocol.EncryptionLevel) (sealer, *wire.ExtendedHeader, error) {
|
||||
switch encLevel {
|
||||
case protocol.EncryptionInitial:
|
||||
sealer, err := p.cryptoSetup.GetInitialSealer()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
hdr := p.getLongHeader(protocol.EncryptionInitial)
|
||||
return sealer, hdr, nil
|
||||
case protocol.Encryption0RTT:
|
||||
sealer, err := p.cryptoSetup.Get0RTTSealer()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
hdr := p.getLongHeader(protocol.Encryption0RTT)
|
||||
return sealer, hdr, nil
|
||||
case protocol.EncryptionHandshake:
|
||||
sealer, err := p.cryptoSetup.GetHandshakeSealer()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
hdr := p.getLongHeader(protocol.EncryptionHandshake)
|
||||
return sealer, hdr, nil
|
||||
case protocol.Encryption1RTT:
|
||||
sealer, err := p.cryptoSetup.Get1RTTSealer()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
hdr := p.getShortHeader(sealer.KeyPhase())
|
||||
return sealer, hdr, nil
|
||||
default:
|
||||
return nil, nil, fmt.Errorf("unexpected encryption level: %s", encLevel)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *packetPacker) getShortHeader(kp protocol.KeyPhaseBit) *wire.ExtendedHeader {
|
||||
pn, pnLen := p.pnManager.PeekPacketNumber(protocol.Encryption1RTT)
|
||||
hdr := &wire.ExtendedHeader{}
|
||||
hdr.PacketNumber = pn
|
||||
hdr.PacketNumberLen = pnLen
|
||||
hdr.DestConnectionID = p.getDestConnID()
|
||||
hdr.KeyPhase = kp
|
||||
return hdr
|
||||
}
|
||||
|
||||
func (p *packetPacker) getLongHeader(encLevel protocol.EncryptionLevel) *wire.ExtendedHeader {
|
||||
pn, pnLen := p.pnManager.PeekPacketNumber(encLevel)
|
||||
hdr := &wire.ExtendedHeader{
|
||||
PacketNumber: pn,
|
||||
PacketNumberLen: pnLen,
|
||||
}
|
||||
hdr.IsLongHeader = true
|
||||
hdr.Version = p.version
|
||||
hdr.SrcConnectionID = p.srcConnID
|
||||
hdr.DestConnectionID = p.getDestConnID()
|
||||
|
||||
//nolint:exhaustive // 1-RTT packets are not long header packets.
|
||||
switch encLevel {
|
||||
case protocol.EncryptionInitial:
|
||||
hdr.Type = protocol.PacketTypeInitial
|
||||
hdr.Token = p.token
|
||||
case protocol.EncryptionHandshake:
|
||||
hdr.Type = protocol.PacketTypeHandshake
|
||||
case protocol.Encryption0RTT:
|
||||
hdr.Type = protocol.PacketType0RTT
|
||||
}
|
||||
return hdr
|
||||
}
|
||||
|
||||
// writeSinglePacket packs a single packet.
|
||||
func (p *packetPacker) writeSinglePacket(
|
||||
hdr *wire.ExtendedHeader,
|
||||
payload *payload,
|
||||
encLevel protocol.EncryptionLevel,
|
||||
sealer sealer,
|
||||
) (*packedPacket, error) {
|
||||
buffer := getPacketBuffer()
|
||||
var paddingLen protocol.ByteCount
|
||||
if encLevel == protocol.EncryptionInitial {
|
||||
paddingLen = p.initialPaddingLen(payload.frames, hdr.GetLength(p.version)+payload.length+protocol.ByteCount(sealer.Overhead()))
|
||||
}
|
||||
contents, err := p.appendPacket(buffer, hdr, payload, paddingLen, encLevel, sealer, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &packedPacket{
|
||||
buffer: buffer,
|
||||
packetContents: contents,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *packetPacker) appendPacket(buffer *packetBuffer, header *wire.ExtendedHeader, payload *payload, padding protocol.ByteCount, encLevel protocol.EncryptionLevel, sealer sealer, isMTUProbePacket bool) (*packetContents, error) {
|
||||
var paddingLen protocol.ByteCount
|
||||
pnLen := protocol.ByteCount(header.PacketNumberLen)
|
||||
if payload.length < 4-pnLen {
|
||||
paddingLen = 4 - pnLen - payload.length
|
||||
}
|
||||
paddingLen += padding
|
||||
if header.IsLongHeader {
|
||||
header.Length = pnLen + protocol.ByteCount(sealer.Overhead()) + payload.length + paddingLen
|
||||
}
|
||||
|
||||
hdrOffset := buffer.Len()
|
||||
buf := bytes.NewBuffer(buffer.Data)
|
||||
if err := header.Write(buf, p.version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
payloadOffset := buf.Len()
|
||||
|
||||
if payload.ack != nil {
|
||||
if err := payload.ack.Write(buf, p.version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if paddingLen > 0 {
|
||||
buf.Write(make([]byte, paddingLen))
|
||||
}
|
||||
for _, frame := range payload.frames {
|
||||
if err := frame.Write(buf, p.version); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if payloadSize := protocol.ByteCount(buf.Len()-payloadOffset) - paddingLen; payloadSize != payload.length {
|
||||
return nil, fmt.Errorf("PacketPacker BUG: payload size inconsistent (expected %d, got %d bytes)", payload.length, payloadSize)
|
||||
}
|
||||
if !isMTUProbePacket {
|
||||
if size := protocol.ByteCount(buf.Len() + sealer.Overhead()); size > p.maxPacketSize {
|
||||
return nil, fmt.Errorf("PacketPacker BUG: packet too large (%d bytes, allowed %d bytes)", size, p.maxPacketSize)
|
||||
}
|
||||
}
|
||||
|
||||
raw := buffer.Data
|
||||
// encrypt the packet
|
||||
raw = raw[:buf.Len()]
|
||||
_ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], header.PacketNumber, raw[hdrOffset:payloadOffset])
|
||||
raw = raw[0 : buf.Len()+sealer.Overhead()]
|
||||
// apply header protection
|
||||
pnOffset := payloadOffset - int(header.PacketNumberLen)
|
||||
sealer.EncryptHeader(raw[pnOffset+4:pnOffset+4+16], &raw[hdrOffset], raw[pnOffset:payloadOffset])
|
||||
buffer.Data = raw
|
||||
|
||||
num := p.pnManager.PopPacketNumber(encLevel)
|
||||
if num != header.PacketNumber {
|
||||
return nil, errors.New("packetPacker BUG: Peeked and Popped packet numbers do not match")
|
||||
}
|
||||
return &packetContents{
|
||||
header: header,
|
||||
ack: payload.ack,
|
||||
frames: payload.frames,
|
||||
length: buffer.Len() - hdrOffset,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *packetPacker) SetToken(token []byte) {
|
||||
p.token = token
|
||||
}
|
||||
|
||||
// When a higher MTU is discovered, use it.
|
||||
func (p *packetPacker) SetMaxPacketSize(s protocol.ByteCount) {
|
||||
p.maxPacketSize = s
|
||||
}
|
||||
|
||||
// If the peer sets a max_packet_size that's smaller than the size we're currently using,
|
||||
// we need to reduce the size of packets we send.
|
||||
func (p *packetPacker) HandleTransportParameters(params *wire.TransportParameters) {
|
||||
if params.MaxUDPPayloadSize != 0 {
|
||||
p.maxPacketSize = utils.MinByteCount(p.maxPacketSize, params.MaxUDPPayloadSize)
|
||||
}
|
||||
}
|
|
@ -1,670 +0,0 @@
|
|||
package quic
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/handshake"
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/qerr"
|
||||
"github.com/lucas-clemente/quic-go/internal/utils"
|
||||
"github.com/lucas-clemente/quic-go/internal/wire"
|
||||
"github.com/lucas-clemente/quic-go/logging"
|
||||
)
|
||||
|
||||
// ErrServerClosed is returned by the Listener or EarlyListener's Accept method after a call to Close.
|
||||
var ErrServerClosed = errors.New("quic: Server closed")
|
||||
|
||||
// packetHandler handles packets
|
||||
type packetHandler interface {
|
||||
handlePacket(*receivedPacket)
|
||||
shutdown()
|
||||
destroy(error)
|
||||
getPerspective() protocol.Perspective
|
||||
}
|
||||
|
||||
type unknownPacketHandler interface {
|
||||
handlePacket(*receivedPacket)
|
||||
setCloseError(error)
|
||||
}
|
||||
|
||||
type packetHandlerManager interface {
|
||||
AddWithConnID(protocol.ConnectionID, protocol.ConnectionID, func() packetHandler) bool
|
||||
Destroy() error
|
||||
connRunner
|
||||
SetServer(unknownPacketHandler)
|
||||
CloseServer()
|
||||
}
|
||||
|
||||
type quicConn interface {
|
||||
EarlyConnection
|
||||
earlyConnReady() <-chan struct{}
|
||||
handlePacket(*receivedPacket)
|
||||
GetVersion() protocol.VersionNumber
|
||||
getPerspective() protocol.Perspective
|
||||
run() error
|
||||
destroy(error)
|
||||
shutdown()
|
||||
}
|
||||
|
||||
// A Listener of QUIC
|
||||
type baseServer struct {
|
||||
mutex sync.Mutex
|
||||
|
||||
acceptEarlyConns bool
|
||||
|
||||
tlsConf *tls.Config
|
||||
config *Config
|
||||
|
||||
conn rawConn
|
||||
// If the server is started with ListenAddr, we create a packet conn.
|
||||
// If it is started with Listen, we take a packet conn as a parameter.
|
||||
createdPacketConn bool
|
||||
|
||||
tokenGenerator *handshake.TokenGenerator
|
||||
|
||||
connHandler packetHandlerManager
|
||||
|
||||
receivedPackets chan *receivedPacket
|
||||
|
||||
// set as a member, so they can be set in the tests
|
||||
newConn func(
|
||||
sendConn,
|
||||
connRunner,
|
||||
protocol.ConnectionID, /* original dest connection ID */
|
||||
*protocol.ConnectionID, /* retry src connection ID */
|
||||
protocol.ConnectionID, /* client dest connection ID */
|
||||
protocol.ConnectionID, /* destination connection ID */
|
||||
protocol.ConnectionID, /* source connection ID */
|
||||
protocol.StatelessResetToken,
|
||||
*Config,
|
||||
*tls.Config,
|
||||
*handshake.TokenGenerator,
|
||||
bool, /* enable 0-RTT */
|
||||
logging.ConnectionTracer,
|
||||
uint64,
|
||||
utils.Logger,
|
||||
protocol.VersionNumber,
|
||||
) quicConn
|
||||
|
||||
serverError error
|
||||
errorChan chan struct{}
|
||||
closed bool
|
||||
running chan struct{} // closed as soon as run() returns
|
||||
|
||||
connQueue chan quicConn
|
||||
connQueueLen int32 // to be used as an atomic
|
||||
|
||||
logger utils.Logger
|
||||
}
|
||||
|
||||
var (
|
||||
_ Listener = &baseServer{}
|
||||
_ unknownPacketHandler = &baseServer{}
|
||||
)
|
||||
|
||||
type earlyServer struct{ *baseServer }
|
||||
|
||||
var _ EarlyListener = &earlyServer{}
|
||||
|
||||
func (s *earlyServer) Accept(ctx context.Context) (EarlyConnection, error) {
|
||||
return s.baseServer.accept(ctx)
|
||||
}
|
||||
|
||||
// ListenAddr creates a QUIC server listening on a given address.
|
||||
// The tls.Config must not be nil and must contain a certificate configuration.
|
||||
// The quic.Config may be nil, in that case the default values will be used.
|
||||
func ListenAddr(addr string, tlsConf *tls.Config, config *Config) (Listener, error) {
|
||||
return listenAddr(addr, tlsConf, config, false)
|
||||
}
|
||||
|
||||
// ListenAddrEarly works like ListenAddr, but it returns connections before the handshake completes.
|
||||
func ListenAddrEarly(addr string, tlsConf *tls.Config, config *Config) (EarlyListener, error) {
|
||||
s, err := listenAddr(addr, tlsConf, config, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &earlyServer{s}, nil
|
||||
}
|
||||
|
||||
func listenAddr(addr string, tlsConf *tls.Config, config *Config, acceptEarly bool) (*baseServer, error) {
|
||||
udpAddr, err := net.ResolveUDPAddr("udp", addr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := net.ListenUDP("udp", udpAddr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serv, err := listen(conn, tlsConf, config, acceptEarly)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serv.createdPacketConn = true
|
||||
return serv, nil
|
||||
}
|
||||
|
||||
// Listen listens for QUIC connections on a given net.PacketConn. If the
|
||||
// PacketConn satisfies the OOBCapablePacketConn interface (as a net.UDPConn
|
||||
// does), ECN and packet info support will be enabled. In this case, ReadMsgUDP
|
||||
// and WriteMsgUDP will be used instead of ReadFrom and WriteTo to read/write
|
||||
// packets. A single net.PacketConn only be used for a single call to Listen.
|
||||
// The PacketConn can be used for simultaneous calls to Dial. QUIC connection
|
||||
// IDs are used for demultiplexing the different connections. The tls.Config
|
||||
// must not be nil and must contain a certificate configuration. The
|
||||
// tls.Config.CipherSuites allows setting of TLS 1.3 cipher suites. Furthermore,
|
||||
// it must define an application control (using NextProtos). The quic.Config may
|
||||
// be nil, in that case the default values will be used.
|
||||
func Listen(conn net.PacketConn, tlsConf *tls.Config, config *Config) (Listener, error) {
|
||||
return listen(conn, tlsConf, config, false)
|
||||
}
|
||||
|
||||
// ListenEarly works like Listen, but it returns connections before the handshake completes.
|
||||
func ListenEarly(conn net.PacketConn, tlsConf *tls.Config, config *Config) (EarlyListener, error) {
|
||||
s, err := listen(conn, tlsConf, config, true)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &earlyServer{s}, nil
|
||||
}
|
||||
|
||||
func listen(conn net.PacketConn, tlsConf *tls.Config, config *Config, acceptEarly bool) (*baseServer, error) {
|
||||
if tlsConf == nil {
|
||||
return nil, errors.New("quic: tls.Config not set")
|
||||
}
|
||||
if err := validateConfig(config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
config = populateServerConfig(config)
|
||||
for _, v := range config.Versions {
|
||||
if !protocol.IsValidVersion(v) {
|
||||
return nil, fmt.Errorf("%s is not a valid QUIC version", v)
|
||||
}
|
||||
}
|
||||
|
||||
connHandler, err := getMultiplexer().AddConn(conn, config.ConnectionIDGenerator.ConnectionIDLen(), config.StatelessResetKey, config.Tracer)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokenGenerator, err := handshake.NewTokenGenerator(rand.Reader)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c, err := wrapConn(conn)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := &baseServer{
|
||||
conn: c,
|
||||
tlsConf: tlsConf,
|
||||
config: config,
|
||||
tokenGenerator: tokenGenerator,
|
||||
connHandler: connHandler,
|
||||
connQueue: make(chan quicConn),
|
||||
errorChan: make(chan struct{}),
|
||||
running: make(chan struct{}),
|
||||
receivedPackets: make(chan *receivedPacket, protocol.MaxServerUnprocessedPackets),
|
||||
newConn: newConnection,
|
||||
logger: utils.DefaultLogger.WithPrefix("server"),
|
||||
acceptEarlyConns: acceptEarly,
|
||||
}
|
||||
go s.run()
|
||||
connHandler.SetServer(s)
|
||||
s.logger.Debugf("Listening for %s connections on %s", conn.LocalAddr().Network(), conn.LocalAddr().String())
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (s *baseServer) run() {
|
||||
defer close(s.running)
|
||||
for {
|
||||
select {
|
||||
case <-s.errorChan:
|
||||
return
|
||||
default:
|
||||
}
|
||||
select {
|
||||
case <-s.errorChan:
|
||||
return
|
||||
case p := <-s.receivedPackets:
|
||||
if bufferStillInUse := s.handlePacketImpl(p); !bufferStillInUse {
|
||||
p.buffer.Release()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var defaultAcceptToken = func(clientAddr net.Addr, token *Token) bool {
|
||||
if token == nil {
|
||||
return false
|
||||
}
|
||||
validity := protocol.TokenValidity
|
||||
if token.IsRetryToken {
|
||||
validity = protocol.RetryTokenValidity
|
||||
}
|
||||
if time.Now().After(token.SentTime.Add(validity)) {
|
||||
return false
|
||||
}
|
||||
var sourceAddr string
|
||||
if udpAddr, ok := clientAddr.(*net.UDPAddr); ok {
|
||||
sourceAddr = udpAddr.IP.String()
|
||||
} else {
|
||||
sourceAddr = clientAddr.String()
|
||||
}
|
||||
return sourceAddr == token.RemoteAddr
|
||||
}
|
||||
|
||||
// Accept returns connections that already completed the handshake.
|
||||
// It is only valid if acceptEarlyConns is false.
|
||||
func (s *baseServer) Accept(ctx context.Context) (Connection, error) {
|
||||
return s.accept(ctx)
|
||||
}
|
||||
|
||||
func (s *baseServer) accept(ctx context.Context) (quicConn, error) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case conn := <-s.connQueue:
|
||||
atomic.AddInt32(&s.connQueueLen, -1)
|
||||
return conn, nil
|
||||
case <-s.errorChan:
|
||||
return nil, s.serverError
|
||||
}
|
||||
}
|
||||
|
||||
// Close the server
|
||||
func (s *baseServer) Close() error {
|
||||
s.mutex.Lock()
|
||||
if s.closed {
|
||||
s.mutex.Unlock()
|
||||
return nil
|
||||
}
|
||||
if s.serverError == nil {
|
||||
s.serverError = ErrServerClosed
|
||||
}
|
||||
// If the server was started with ListenAddr, we created the packet conn.
|
||||
// We need to close it in order to make the go routine reading from that conn return.
|
||||
createdPacketConn := s.createdPacketConn
|
||||
s.closed = true
|
||||
close(s.errorChan)
|
||||
s.mutex.Unlock()
|
||||
|
||||
<-s.running
|
||||
s.connHandler.CloseServer()
|
||||
if createdPacketConn {
|
||||
return s.connHandler.Destroy()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *baseServer) setCloseError(e error) {
|
||||
s.mutex.Lock()
|
||||
defer s.mutex.Unlock()
|
||||
if s.closed {
|
||||
return
|
||||
}
|
||||
s.closed = true
|
||||
s.serverError = e
|
||||
close(s.errorChan)
|
||||
}
|
||||
|
||||
// Addr returns the server's network address
|
||||
func (s *baseServer) Addr() net.Addr {
|
||||
return s.conn.LocalAddr()
|
||||
}
|
||||
|
||||
func (s *baseServer) handlePacket(p *receivedPacket) {
|
||||
select {
|
||||
case s.receivedPackets <- p:
|
||||
default:
|
||||
s.logger.Debugf("Dropping packet from %s (%d bytes). Server receive queue full.", p.remoteAddr, p.Size())
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropDOSPrevention)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *baseServer) handlePacketImpl(p *receivedPacket) bool /* is the buffer still in use? */ {
|
||||
if wire.IsVersionNegotiationPacket(p.data) {
|
||||
s.logger.Debugf("Dropping Version Negotiation packet.")
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeVersionNegotiation, p.Size(), logging.PacketDropUnexpectedPacket)
|
||||
}
|
||||
return false
|
||||
}
|
||||
// If we're creating a new connection, the packet will be passed to the connection.
|
||||
// The header will then be parsed again.
|
||||
hdr, _, _, err := wire.ParsePacket(p.data, s.config.ConnectionIDGenerator.ConnectionIDLen())
|
||||
if err != nil && err != wire.ErrUnsupportedVersion {
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropHeaderParseError)
|
||||
}
|
||||
s.logger.Debugf("Error parsing packet: %s", err)
|
||||
return false
|
||||
}
|
||||
// Short header packets should never end up here in the first place
|
||||
if !hdr.IsLongHeader {
|
||||
panic(fmt.Sprintf("misrouted packet: %#v", hdr))
|
||||
}
|
||||
if hdr.Type == protocol.PacketTypeInitial && p.Size() < protocol.MinInitialPacketSize {
|
||||
s.logger.Debugf("Dropping a packet that is too small to be a valid Initial (%d bytes)", p.Size())
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeInitial, p.Size(), logging.PacketDropUnexpectedPacket)
|
||||
}
|
||||
return false
|
||||
}
|
||||
// send a Version Negotiation Packet if the client is speaking a different protocol version
|
||||
if !protocol.IsSupportedVersion(s.config.Versions, hdr.Version) {
|
||||
if p.Size() < protocol.MinUnknownVersionPacketSize {
|
||||
s.logger.Debugf("Dropping a packet with an unknown version that is too small (%d bytes)", p.Size())
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeNotDetermined, p.Size(), logging.PacketDropUnexpectedPacket)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if !s.config.DisableVersionNegotiationPackets {
|
||||
go s.sendVersionNegotiationPacket(p, hdr)
|
||||
}
|
||||
return false
|
||||
}
|
||||
if hdr.IsLongHeader && hdr.Type != protocol.PacketTypeInitial {
|
||||
// Drop long header packets.
|
||||
// There's little point in sending a Stateless Reset, since the client
|
||||
// might not have received the token yet.
|
||||
s.logger.Debugf("Dropping long header packet of type %s (%d bytes)", hdr.Type, len(p.data))
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeFromHeader(hdr), p.Size(), logging.PacketDropUnexpectedPacket)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
s.logger.Debugf("<- Received Initial packet.")
|
||||
|
||||
if err := s.handleInitialImpl(p, hdr); err != nil {
|
||||
s.logger.Errorf("Error occurred handling initial packet: %s", err)
|
||||
}
|
||||
// Don't put the packet buffer back.
|
||||
// handleInitialImpl deals with the buffer.
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *baseServer) handleInitialImpl(p *receivedPacket, hdr *wire.Header) error {
|
||||
if len(hdr.Token) == 0 && hdr.DestConnectionID.Len() < protocol.MinConnectionIDLenInitial {
|
||||
p.buffer.Release()
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeInitial, p.Size(), logging.PacketDropUnexpectedPacket)
|
||||
}
|
||||
return errors.New("too short connection ID")
|
||||
}
|
||||
|
||||
var (
|
||||
token *Token
|
||||
retrySrcConnID *protocol.ConnectionID
|
||||
)
|
||||
origDestConnID := hdr.DestConnectionID
|
||||
if len(hdr.Token) > 0 {
|
||||
c, err := s.tokenGenerator.DecodeToken(hdr.Token)
|
||||
if err == nil {
|
||||
token = &Token{
|
||||
IsRetryToken: c.IsRetryToken,
|
||||
RemoteAddr: c.RemoteAddr,
|
||||
SentTime: c.SentTime,
|
||||
}
|
||||
if token.IsRetryToken {
|
||||
origDestConnID = c.OriginalDestConnectionID
|
||||
retrySrcConnID = &c.RetrySrcConnectionID
|
||||
}
|
||||
}
|
||||
}
|
||||
if !s.config.AcceptToken(p.remoteAddr, token) {
|
||||
go func() {
|
||||
defer p.buffer.Release()
|
||||
if token != nil && token.IsRetryToken {
|
||||
if err := s.maybeSendInvalidToken(p, hdr); err != nil {
|
||||
s.logger.Debugf("Error sending INVALID_TOKEN error: %s", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err := s.sendRetry(p.remoteAddr, hdr, p.info); err != nil {
|
||||
s.logger.Debugf("Error sending Retry: %s", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
if queueLen := atomic.LoadInt32(&s.connQueueLen); queueLen >= protocol.MaxAcceptQueueSize {
|
||||
s.logger.Debugf("Rejecting new connection. Server currently busy. Accept queue length: %d (max %d)", queueLen, protocol.MaxAcceptQueueSize)
|
||||
go func() {
|
||||
defer p.buffer.Release()
|
||||
if err := s.sendConnectionRefused(p.remoteAddr, hdr, p.info); err != nil {
|
||||
s.logger.Debugf("Error rejecting connection: %s", err)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
connID, err := s.config.ConnectionIDGenerator.GenerateConnectionID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s.logger.Debugf("Changing connection ID to %s.", protocol.ConnectionID(connID))
|
||||
var conn quicConn
|
||||
tracingID := nextConnTracingID()
|
||||
if added := s.connHandler.AddWithConnID(hdr.DestConnectionID, connID, func() packetHandler {
|
||||
var tracer logging.ConnectionTracer
|
||||
if s.config.Tracer != nil {
|
||||
// Use the same connection ID that is passed to the client's GetLogWriter callback.
|
||||
connID := hdr.DestConnectionID
|
||||
if origDestConnID.Len() > 0 {
|
||||
connID = origDestConnID
|
||||
}
|
||||
tracer = s.config.Tracer.TracerForConnection(
|
||||
context.WithValue(context.Background(), ConnectionTracingKey, tracingID),
|
||||
protocol.PerspectiveServer,
|
||||
connID,
|
||||
)
|
||||
}
|
||||
conn = s.newConn(
|
||||
newSendConn(s.conn, p.remoteAddr, p.info),
|
||||
s.connHandler,
|
||||
origDestConnID,
|
||||
retrySrcConnID,
|
||||
hdr.DestConnectionID,
|
||||
hdr.SrcConnectionID,
|
||||
connID,
|
||||
s.connHandler.GetStatelessResetToken(connID),
|
||||
s.config,
|
||||
s.tlsConf,
|
||||
s.tokenGenerator,
|
||||
s.acceptEarlyConns,
|
||||
tracer,
|
||||
tracingID,
|
||||
s.logger,
|
||||
hdr.Version,
|
||||
)
|
||||
conn.handlePacket(p)
|
||||
return conn
|
||||
}); !added {
|
||||
return nil
|
||||
}
|
||||
go conn.run()
|
||||
go s.handleNewConn(conn)
|
||||
if conn == nil {
|
||||
p.buffer.Release()
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *baseServer) handleNewConn(conn quicConn) {
|
||||
connCtx := conn.Context()
|
||||
if s.acceptEarlyConns {
|
||||
// wait until the early connection is ready (or the handshake fails)
|
||||
select {
|
||||
case <-conn.earlyConnReady():
|
||||
case <-connCtx.Done():
|
||||
return
|
||||
}
|
||||
} else {
|
||||
// wait until the handshake is complete (or fails)
|
||||
select {
|
||||
case <-conn.HandshakeComplete().Done():
|
||||
case <-connCtx.Done():
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
atomic.AddInt32(&s.connQueueLen, 1)
|
||||
select {
|
||||
case s.connQueue <- conn:
|
||||
// blocks until the connection is accepted
|
||||
case <-connCtx.Done():
|
||||
atomic.AddInt32(&s.connQueueLen, -1)
|
||||
// don't pass connections that were already closed to Accept()
|
||||
}
|
||||
}
|
||||
|
||||
func (s *baseServer) sendRetry(remoteAddr net.Addr, hdr *wire.Header, info *packetInfo) error {
|
||||
// Log the Initial packet now.
|
||||
// If no Retry is sent, the packet will be logged by the connection.
|
||||
(&wire.ExtendedHeader{Header: *hdr}).Log(s.logger)
|
||||
srcConnID, err := s.config.ConnectionIDGenerator.GenerateConnectionID()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
token, err := s.tokenGenerator.NewRetryToken(remoteAddr, hdr.DestConnectionID, srcConnID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
replyHdr := &wire.ExtendedHeader{}
|
||||
replyHdr.IsLongHeader = true
|
||||
replyHdr.Type = protocol.PacketTypeRetry
|
||||
replyHdr.Version = hdr.Version
|
||||
replyHdr.SrcConnectionID = srcConnID
|
||||
replyHdr.DestConnectionID = hdr.SrcConnectionID
|
||||
replyHdr.Token = token
|
||||
if s.logger.Debug() {
|
||||
s.logger.Debugf("Changing connection ID to %s.", protocol.ConnectionID(srcConnID))
|
||||
s.logger.Debugf("-> Sending Retry")
|
||||
replyHdr.Log(s.logger)
|
||||
}
|
||||
|
||||
packetBuffer := getPacketBuffer()
|
||||
defer packetBuffer.Release()
|
||||
buf := bytes.NewBuffer(packetBuffer.Data)
|
||||
if err := replyHdr.Write(buf, hdr.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
// append the Retry integrity tag
|
||||
tag := handshake.GetRetryIntegrityTag(buf.Bytes(), hdr.DestConnectionID, hdr.Version)
|
||||
buf.Write(tag[:])
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(buf.Len()), nil)
|
||||
}
|
||||
_, err = s.conn.WritePacket(buf.Bytes(), remoteAddr, info.OOB())
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *baseServer) maybeSendInvalidToken(p *receivedPacket, hdr *wire.Header) error {
|
||||
// Only send INVALID_TOKEN if we can unprotect the packet.
|
||||
// This makes sure that we won't send it for packets that were corrupted.
|
||||
sealer, opener := handshake.NewInitialAEAD(hdr.DestConnectionID, protocol.PerspectiveServer, hdr.Version)
|
||||
data := p.data[:hdr.ParsedLen()+hdr.Length]
|
||||
extHdr, err := unpackHeader(opener, hdr, data, hdr.Version)
|
||||
if err != nil {
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeInitial, p.Size(), logging.PacketDropHeaderParseError)
|
||||
}
|
||||
// don't return the error here. Just drop the packet.
|
||||
return nil
|
||||
}
|
||||
hdrLen := extHdr.ParsedLen()
|
||||
if _, err := opener.Open(data[hdrLen:hdrLen], data[hdrLen:], extHdr.PacketNumber, data[:hdrLen]); err != nil {
|
||||
// don't return the error here. Just drop the packet.
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.DroppedPacket(p.remoteAddr, logging.PacketTypeInitial, p.Size(), logging.PacketDropPayloadDecryptError)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if s.logger.Debug() {
|
||||
s.logger.Debugf("Client sent an invalid retry token. Sending INVALID_TOKEN to %s.", p.remoteAddr)
|
||||
}
|
||||
return s.sendError(p.remoteAddr, hdr, sealer, qerr.InvalidToken, p.info)
|
||||
}
|
||||
|
||||
func (s *baseServer) sendConnectionRefused(remoteAddr net.Addr, hdr *wire.Header, info *packetInfo) error {
|
||||
sealer, _ := handshake.NewInitialAEAD(hdr.DestConnectionID, protocol.PerspectiveServer, hdr.Version)
|
||||
return s.sendError(remoteAddr, hdr, sealer, qerr.ConnectionRefused, info)
|
||||
}
|
||||
|
||||
// sendError sends the error as a response to the packet received with header hdr
|
||||
func (s *baseServer) sendError(remoteAddr net.Addr, hdr *wire.Header, sealer handshake.LongHeaderSealer, errorCode qerr.TransportErrorCode, info *packetInfo) error {
|
||||
packetBuffer := getPacketBuffer()
|
||||
defer packetBuffer.Release()
|
||||
buf := bytes.NewBuffer(packetBuffer.Data)
|
||||
|
||||
ccf := &wire.ConnectionCloseFrame{ErrorCode: uint64(errorCode)}
|
||||
|
||||
replyHdr := &wire.ExtendedHeader{}
|
||||
replyHdr.IsLongHeader = true
|
||||
replyHdr.Type = protocol.PacketTypeInitial
|
||||
replyHdr.Version = hdr.Version
|
||||
replyHdr.SrcConnectionID = hdr.DestConnectionID
|
||||
replyHdr.DestConnectionID = hdr.SrcConnectionID
|
||||
replyHdr.PacketNumberLen = protocol.PacketNumberLen4
|
||||
replyHdr.Length = 4 /* packet number len */ + ccf.Length(hdr.Version) + protocol.ByteCount(sealer.Overhead())
|
||||
if err := replyHdr.Write(buf, hdr.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
payloadOffset := buf.Len()
|
||||
|
||||
if err := ccf.Write(buf, hdr.Version); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
raw := buf.Bytes()
|
||||
_ = sealer.Seal(raw[payloadOffset:payloadOffset], raw[payloadOffset:], replyHdr.PacketNumber, raw[:payloadOffset])
|
||||
raw = raw[0 : buf.Len()+sealer.Overhead()]
|
||||
|
||||
pnOffset := payloadOffset - int(replyHdr.PacketNumberLen)
|
||||
sealer.EncryptHeader(
|
||||
raw[pnOffset+4:pnOffset+4+16],
|
||||
&raw[0],
|
||||
raw[pnOffset:payloadOffset],
|
||||
)
|
||||
|
||||
replyHdr.Log(s.logger)
|
||||
wire.LogFrame(s.logger, ccf, true)
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.SentPacket(remoteAddr, &replyHdr.Header, protocol.ByteCount(len(raw)), []logging.Frame{ccf})
|
||||
}
|
||||
_, err := s.conn.WritePacket(raw, remoteAddr, info.OOB())
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *baseServer) sendVersionNegotiationPacket(p *receivedPacket, hdr *wire.Header) {
|
||||
s.logger.Debugf("Client offered version %s, sending Version Negotiation", hdr.Version)
|
||||
data := wire.ComposeVersionNegotiation(hdr.SrcConnectionID, hdr.DestConnectionID, s.config.Versions)
|
||||
if s.config.Tracer != nil {
|
||||
s.config.Tracer.SentPacket(
|
||||
p.remoteAddr,
|
||||
&wire.Header{
|
||||
IsLongHeader: true,
|
||||
DestConnectionID: hdr.SrcConnectionID,
|
||||
SrcConnectionID: hdr.DestConnectionID,
|
||||
},
|
||||
protocol.ByteCount(len(data)),
|
||||
nil,
|
||||
)
|
||||
}
|
||||
if _, err := s.conn.WritePacket(data, p.remoteAddr, p.info.OOB()); err != nil {
|
||||
s.logger.Debugf("Error sending Version Negotiation: %s", err)
|
||||
}
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
package quic
|
||||
|
||||
import (
|
||||
"github.com/cheekybits/genny/generic"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
)
|
||||
|
||||
// In the auto-generated streams maps, we need to be able to close the streams.
|
||||
// Therefore, extend the generic.Type with the stream close method.
|
||||
// This definition must be in a file that Genny doesn't process.
|
||||
type item interface {
|
||||
generic.Type
|
||||
updateSendWindow(protocol.ByteCount)
|
||||
closeForShutdown(error)
|
||||
}
|
||||
|
||||
const streamTypeGeneric protocol.StreamType = protocol.StreamTypeUni
|
|
@ -1,192 +0,0 @@
|
|||
// This file was automatically generated by genny.
|
||||
// Any changes will be lost if this file is regenerated.
|
||||
// see https://github.com/cheekybits/genny
|
||||
|
||||
package quic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/wire"
|
||||
)
|
||||
|
||||
// When a stream is deleted before it was accepted, we can't delete it from the map immediately.
|
||||
// We need to wait until the application accepts it, and delete it then.
|
||||
type streamIEntry struct {
|
||||
stream streamI
|
||||
shouldDelete bool
|
||||
}
|
||||
|
||||
type incomingBidiStreamsMap struct {
|
||||
mutex sync.RWMutex
|
||||
newStreamChan chan struct{}
|
||||
|
||||
streams map[protocol.StreamNum]streamIEntry
|
||||
|
||||
nextStreamToAccept protocol.StreamNum // the next stream that will be returned by AcceptStream()
|
||||
nextStreamToOpen protocol.StreamNum // the highest stream that the peer opened
|
||||
maxStream protocol.StreamNum // the highest stream that the peer is allowed to open
|
||||
maxNumStreams uint64 // maximum number of streams
|
||||
|
||||
newStream func(protocol.StreamNum) streamI
|
||||
queueMaxStreamID func(*wire.MaxStreamsFrame)
|
||||
|
||||
closeErr error
|
||||
}
|
||||
|
||||
func newIncomingBidiStreamsMap(
|
||||
newStream func(protocol.StreamNum) streamI,
|
||||
maxStreams uint64,
|
||||
queueControlFrame func(wire.Frame),
|
||||
) *incomingBidiStreamsMap {
|
||||
return &incomingBidiStreamsMap{
|
||||
newStreamChan: make(chan struct{}, 1),
|
||||
streams: make(map[protocol.StreamNum]streamIEntry),
|
||||
maxStream: protocol.StreamNum(maxStreams),
|
||||
maxNumStreams: maxStreams,
|
||||
newStream: newStream,
|
||||
nextStreamToOpen: 1,
|
||||
nextStreamToAccept: 1,
|
||||
queueMaxStreamID: func(f *wire.MaxStreamsFrame) { queueControlFrame(f) },
|
||||
}
|
||||
}
|
||||
|
||||
func (m *incomingBidiStreamsMap) AcceptStream(ctx context.Context) (streamI, error) {
|
||||
// drain the newStreamChan, so we don't check the map twice if the stream doesn't exist
|
||||
select {
|
||||
case <-m.newStreamChan:
|
||||
default:
|
||||
}
|
||||
|
||||
m.mutex.Lock()
|
||||
|
||||
var num protocol.StreamNum
|
||||
var entry streamIEntry
|
||||
for {
|
||||
num = m.nextStreamToAccept
|
||||
if m.closeErr != nil {
|
||||
m.mutex.Unlock()
|
||||
return nil, m.closeErr
|
||||
}
|
||||
var ok bool
|
||||
entry, ok = m.streams[num]
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-m.newStreamChan:
|
||||
}
|
||||
m.mutex.Lock()
|
||||
}
|
||||
m.nextStreamToAccept++
|
||||
// If this stream was completed before being accepted, we can delete it now.
|
||||
if entry.shouldDelete {
|
||||
if err := m.deleteStream(num); err != nil {
|
||||
m.mutex.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
return entry.stream, nil
|
||||
}
|
||||
|
||||
func (m *incomingBidiStreamsMap) GetOrOpenStream(num protocol.StreamNum) (streamI, error) {
|
||||
m.mutex.RLock()
|
||||
if num > m.maxStream {
|
||||
m.mutex.RUnlock()
|
||||
return nil, streamError{
|
||||
message: "peer tried to open stream %d (current limit: %d)",
|
||||
nums: []protocol.StreamNum{num, m.maxStream},
|
||||
}
|
||||
}
|
||||
// if the num is smaller than the highest we accepted
|
||||
// * this stream exists in the map, and we can return it, or
|
||||
// * this stream was already closed, then we can return the nil
|
||||
if num < m.nextStreamToOpen {
|
||||
var s streamI
|
||||
// If the stream was already queued for deletion, and is just waiting to be accepted, don't return it.
|
||||
if entry, ok := m.streams[num]; ok && !entry.shouldDelete {
|
||||
s = entry.stream
|
||||
}
|
||||
m.mutex.RUnlock()
|
||||
return s, nil
|
||||
}
|
||||
m.mutex.RUnlock()
|
||||
|
||||
m.mutex.Lock()
|
||||
// no need to check the two error conditions from above again
|
||||
// * maxStream can only increase, so if the id was valid before, it definitely is valid now
|
||||
// * highestStream is only modified by this function
|
||||
for newNum := m.nextStreamToOpen; newNum <= num; newNum++ {
|
||||
m.streams[newNum] = streamIEntry{stream: m.newStream(newNum)}
|
||||
select {
|
||||
case m.newStreamChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
m.nextStreamToOpen = num + 1
|
||||
entry := m.streams[num]
|
||||
m.mutex.Unlock()
|
||||
return entry.stream, nil
|
||||
}
|
||||
|
||||
func (m *incomingBidiStreamsMap) DeleteStream(num protocol.StreamNum) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
return m.deleteStream(num)
|
||||
}
|
||||
|
||||
func (m *incomingBidiStreamsMap) deleteStream(num protocol.StreamNum) error {
|
||||
if _, ok := m.streams[num]; !ok {
|
||||
return streamError{
|
||||
message: "tried to delete unknown incoming stream %d",
|
||||
nums: []protocol.StreamNum{num},
|
||||
}
|
||||
}
|
||||
|
||||
// Don't delete this stream yet, if it was not yet accepted.
|
||||
// Just save it to streamsToDelete map, to make sure it is deleted as soon as it gets accepted.
|
||||
if num >= m.nextStreamToAccept {
|
||||
entry, ok := m.streams[num]
|
||||
if ok && entry.shouldDelete {
|
||||
return streamError{
|
||||
message: "tried to delete incoming stream %d multiple times",
|
||||
nums: []protocol.StreamNum{num},
|
||||
}
|
||||
}
|
||||
entry.shouldDelete = true
|
||||
m.streams[num] = entry // can't assign to struct in map, so we need to reassign
|
||||
return nil
|
||||
}
|
||||
|
||||
delete(m.streams, num)
|
||||
// queue a MAX_STREAM_ID frame, giving the peer the option to open a new stream
|
||||
if m.maxNumStreams > uint64(len(m.streams)) {
|
||||
maxStream := m.nextStreamToOpen + protocol.StreamNum(m.maxNumStreams-uint64(len(m.streams))) - 1
|
||||
// Never send a value larger than protocol.MaxStreamCount.
|
||||
if maxStream <= protocol.MaxStreamCount {
|
||||
m.maxStream = maxStream
|
||||
m.queueMaxStreamID(&wire.MaxStreamsFrame{
|
||||
Type: protocol.StreamTypeBidi,
|
||||
MaxStreamNum: m.maxStream,
|
||||
})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *incomingBidiStreamsMap) CloseWithError(err error) {
|
||||
m.mutex.Lock()
|
||||
m.closeErr = err
|
||||
for _, entry := range m.streams {
|
||||
entry.stream.closeForShutdown(err)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
close(m.newStreamChan)
|
||||
}
|
|
@ -1,190 +0,0 @@
|
|||
package quic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/wire"
|
||||
)
|
||||
|
||||
// When a stream is deleted before it was accepted, we can't delete it from the map immediately.
|
||||
// We need to wait until the application accepts it, and delete it then.
|
||||
type itemEntry struct {
|
||||
stream item
|
||||
shouldDelete bool
|
||||
}
|
||||
|
||||
//go:generate genny -in $GOFILE -out streams_map_incoming_bidi.go gen "item=streamI Item=BidiStream streamTypeGeneric=protocol.StreamTypeBidi"
|
||||
//go:generate genny -in $GOFILE -out streams_map_incoming_uni.go gen "item=receiveStreamI Item=UniStream streamTypeGeneric=protocol.StreamTypeUni"
|
||||
type incomingItemsMap struct {
|
||||
mutex sync.RWMutex
|
||||
newStreamChan chan struct{}
|
||||
|
||||
streams map[protocol.StreamNum]itemEntry
|
||||
|
||||
nextStreamToAccept protocol.StreamNum // the next stream that will be returned by AcceptStream()
|
||||
nextStreamToOpen protocol.StreamNum // the highest stream that the peer opened
|
||||
maxStream protocol.StreamNum // the highest stream that the peer is allowed to open
|
||||
maxNumStreams uint64 // maximum number of streams
|
||||
|
||||
newStream func(protocol.StreamNum) item
|
||||
queueMaxStreamID func(*wire.MaxStreamsFrame)
|
||||
|
||||
closeErr error
|
||||
}
|
||||
|
||||
func newIncomingItemsMap(
|
||||
newStream func(protocol.StreamNum) item,
|
||||
maxStreams uint64,
|
||||
queueControlFrame func(wire.Frame),
|
||||
) *incomingItemsMap {
|
||||
return &incomingItemsMap{
|
||||
newStreamChan: make(chan struct{}, 1),
|
||||
streams: make(map[protocol.StreamNum]itemEntry),
|
||||
maxStream: protocol.StreamNum(maxStreams),
|
||||
maxNumStreams: maxStreams,
|
||||
newStream: newStream,
|
||||
nextStreamToOpen: 1,
|
||||
nextStreamToAccept: 1,
|
||||
queueMaxStreamID: func(f *wire.MaxStreamsFrame) { queueControlFrame(f) },
|
||||
}
|
||||
}
|
||||
|
||||
func (m *incomingItemsMap) AcceptStream(ctx context.Context) (item, error) {
|
||||
// drain the newStreamChan, so we don't check the map twice if the stream doesn't exist
|
||||
select {
|
||||
case <-m.newStreamChan:
|
||||
default:
|
||||
}
|
||||
|
||||
m.mutex.Lock()
|
||||
|
||||
var num protocol.StreamNum
|
||||
var entry itemEntry
|
||||
for {
|
||||
num = m.nextStreamToAccept
|
||||
if m.closeErr != nil {
|
||||
m.mutex.Unlock()
|
||||
return nil, m.closeErr
|
||||
}
|
||||
var ok bool
|
||||
entry, ok = m.streams[num]
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-m.newStreamChan:
|
||||
}
|
||||
m.mutex.Lock()
|
||||
}
|
||||
m.nextStreamToAccept++
|
||||
// If this stream was completed before being accepted, we can delete it now.
|
||||
if entry.shouldDelete {
|
||||
if err := m.deleteStream(num); err != nil {
|
||||
m.mutex.Unlock()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
return entry.stream, nil
|
||||
}
|
||||
|
||||
func (m *incomingItemsMap) GetOrOpenStream(num protocol.StreamNum) (item, error) {
|
||||
m.mutex.RLock()
|
||||
if num > m.maxStream {
|
||||
m.mutex.RUnlock()
|
||||
return nil, streamError{
|
||||
message: "peer tried to open stream %d (current limit: %d)",
|
||||
nums: []protocol.StreamNum{num, m.maxStream},
|
||||
}
|
||||
}
|
||||
// if the num is smaller than the highest we accepted
|
||||
// * this stream exists in the map, and we can return it, or
|
||||
// * this stream was already closed, then we can return the nil
|
||||
if num < m.nextStreamToOpen {
|
||||
var s item
|
||||
// If the stream was already queued for deletion, and is just waiting to be accepted, don't return it.
|
||||
if entry, ok := m.streams[num]; ok && !entry.shouldDelete {
|
||||
s = entry.stream
|
||||
}
|
||||
m.mutex.RUnlock()
|
||||
return s, nil
|
||||
}
|
||||
m.mutex.RUnlock()
|
||||
|
||||
m.mutex.Lock()
|
||||
// no need to check the two error conditions from above again
|
||||
// * maxStream can only increase, so if the id was valid before, it definitely is valid now
|
||||
// * highestStream is only modified by this function
|
||||
for newNum := m.nextStreamToOpen; newNum <= num; newNum++ {
|
||||
m.streams[newNum] = itemEntry{stream: m.newStream(newNum)}
|
||||
select {
|
||||
case m.newStreamChan <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
m.nextStreamToOpen = num + 1
|
||||
entry := m.streams[num]
|
||||
m.mutex.Unlock()
|
||||
return entry.stream, nil
|
||||
}
|
||||
|
||||
func (m *incomingItemsMap) DeleteStream(num protocol.StreamNum) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
return m.deleteStream(num)
|
||||
}
|
||||
|
||||
func (m *incomingItemsMap) deleteStream(num protocol.StreamNum) error {
|
||||
if _, ok := m.streams[num]; !ok {
|
||||
return streamError{
|
||||
message: "tried to delete unknown incoming stream %d",
|
||||
nums: []protocol.StreamNum{num},
|
||||
}
|
||||
}
|
||||
|
||||
// Don't delete this stream yet, if it was not yet accepted.
|
||||
// Just save it to streamsToDelete map, to make sure it is deleted as soon as it gets accepted.
|
||||
if num >= m.nextStreamToAccept {
|
||||
entry, ok := m.streams[num]
|
||||
if ok && entry.shouldDelete {
|
||||
return streamError{
|
||||
message: "tried to delete incoming stream %d multiple times",
|
||||
nums: []protocol.StreamNum{num},
|
||||
}
|
||||
}
|
||||
entry.shouldDelete = true
|
||||
m.streams[num] = entry // can't assign to struct in map, so we need to reassign
|
||||
return nil
|
||||
}
|
||||
|
||||
delete(m.streams, num)
|
||||
// queue a MAX_STREAM_ID frame, giving the peer the option to open a new stream
|
||||
if m.maxNumStreams > uint64(len(m.streams)) {
|
||||
maxStream := m.nextStreamToOpen + protocol.StreamNum(m.maxNumStreams-uint64(len(m.streams))) - 1
|
||||
// Never send a value larger than protocol.MaxStreamCount.
|
||||
if maxStream <= protocol.MaxStreamCount {
|
||||
m.maxStream = maxStream
|
||||
m.queueMaxStreamID(&wire.MaxStreamsFrame{
|
||||
Type: streamTypeGeneric,
|
||||
MaxStreamNum: m.maxStream,
|
||||
})
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *incomingItemsMap) CloseWithError(err error) {
|
||||
m.mutex.Lock()
|
||||
m.closeErr = err
|
||||
for _, entry := range m.streams {
|
||||
entry.stream.closeForShutdown(err)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
close(m.newStreamChan)
|
||||
}
|
|
@ -1,226 +0,0 @@
|
|||
// This file was automatically generated by genny.
|
||||
// Any changes will be lost if this file is regenerated.
|
||||
// see https://github.com/cheekybits/genny
|
||||
|
||||
package quic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/wire"
|
||||
)
|
||||
|
||||
type outgoingBidiStreamsMap struct {
|
||||
mutex sync.RWMutex
|
||||
|
||||
streams map[protocol.StreamNum]streamI
|
||||
|
||||
openQueue map[uint64]chan struct{}
|
||||
lowestInQueue uint64
|
||||
highestInQueue uint64
|
||||
|
||||
nextStream protocol.StreamNum // stream ID of the stream returned by OpenStream(Sync)
|
||||
maxStream protocol.StreamNum // the maximum stream ID we're allowed to open
|
||||
blockedSent bool // was a STREAMS_BLOCKED sent for the current maxStream
|
||||
|
||||
newStream func(protocol.StreamNum) streamI
|
||||
queueStreamIDBlocked func(*wire.StreamsBlockedFrame)
|
||||
|
||||
closeErr error
|
||||
}
|
||||
|
||||
func newOutgoingBidiStreamsMap(
|
||||
newStream func(protocol.StreamNum) streamI,
|
||||
queueControlFrame func(wire.Frame),
|
||||
) *outgoingBidiStreamsMap {
|
||||
return &outgoingBidiStreamsMap{
|
||||
streams: make(map[protocol.StreamNum]streamI),
|
||||
openQueue: make(map[uint64]chan struct{}),
|
||||
maxStream: protocol.InvalidStreamNum,
|
||||
nextStream: 1,
|
||||
newStream: newStream,
|
||||
queueStreamIDBlocked: func(f *wire.StreamsBlockedFrame) { queueControlFrame(f) },
|
||||
}
|
||||
}
|
||||
|
||||
func (m *outgoingBidiStreamsMap) OpenStream() (streamI, error) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
if m.closeErr != nil {
|
||||
return nil, m.closeErr
|
||||
}
|
||||
|
||||
// if there are OpenStreamSync calls waiting, return an error here
|
||||
if len(m.openQueue) > 0 || m.nextStream > m.maxStream {
|
||||
m.maybeSendBlockedFrame()
|
||||
return nil, streamOpenErr{errTooManyOpenStreams}
|
||||
}
|
||||
return m.openStream(), nil
|
||||
}
|
||||
|
||||
func (m *outgoingBidiStreamsMap) OpenStreamSync(ctx context.Context) (streamI, error) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
if m.closeErr != nil {
|
||||
return nil, m.closeErr
|
||||
}
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(m.openQueue) == 0 && m.nextStream <= m.maxStream {
|
||||
return m.openStream(), nil
|
||||
}
|
||||
|
||||
waitChan := make(chan struct{}, 1)
|
||||
queuePos := m.highestInQueue
|
||||
m.highestInQueue++
|
||||
if len(m.openQueue) == 0 {
|
||||
m.lowestInQueue = queuePos
|
||||
}
|
||||
m.openQueue[queuePos] = waitChan
|
||||
m.maybeSendBlockedFrame()
|
||||
|
||||
for {
|
||||
m.mutex.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
m.mutex.Lock()
|
||||
delete(m.openQueue, queuePos)
|
||||
return nil, ctx.Err()
|
||||
case <-waitChan:
|
||||
}
|
||||
m.mutex.Lock()
|
||||
|
||||
if m.closeErr != nil {
|
||||
return nil, m.closeErr
|
||||
}
|
||||
if m.nextStream > m.maxStream {
|
||||
// no stream available. Continue waiting
|
||||
continue
|
||||
}
|
||||
str := m.openStream()
|
||||
delete(m.openQueue, queuePos)
|
||||
m.lowestInQueue = queuePos + 1
|
||||
m.unblockOpenSync()
|
||||
return str, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *outgoingBidiStreamsMap) openStream() streamI {
|
||||
s := m.newStream(m.nextStream)
|
||||
m.streams[m.nextStream] = s
|
||||
m.nextStream++
|
||||
return s
|
||||
}
|
||||
|
||||
// maybeSendBlockedFrame queues a STREAMS_BLOCKED frame for the current stream offset,
|
||||
// if we haven't sent one for this offset yet
|
||||
func (m *outgoingBidiStreamsMap) maybeSendBlockedFrame() {
|
||||
if m.blockedSent {
|
||||
return
|
||||
}
|
||||
|
||||
var streamNum protocol.StreamNum
|
||||
if m.maxStream != protocol.InvalidStreamNum {
|
||||
streamNum = m.maxStream
|
||||
}
|
||||
m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{
|
||||
Type: protocol.StreamTypeBidi,
|
||||
StreamLimit: streamNum,
|
||||
})
|
||||
m.blockedSent = true
|
||||
}
|
||||
|
||||
func (m *outgoingBidiStreamsMap) GetStream(num protocol.StreamNum) (streamI, error) {
|
||||
m.mutex.RLock()
|
||||
if num >= m.nextStream {
|
||||
m.mutex.RUnlock()
|
||||
return nil, streamError{
|
||||
message: "peer attempted to open stream %d",
|
||||
nums: []protocol.StreamNum{num},
|
||||
}
|
||||
}
|
||||
s := m.streams[num]
|
||||
m.mutex.RUnlock()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (m *outgoingBidiStreamsMap) DeleteStream(num protocol.StreamNum) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
if _, ok := m.streams[num]; !ok {
|
||||
return streamError{
|
||||
message: "tried to delete unknown outgoing stream %d",
|
||||
nums: []protocol.StreamNum{num},
|
||||
}
|
||||
}
|
||||
delete(m.streams, num)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *outgoingBidiStreamsMap) SetMaxStream(num protocol.StreamNum) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
if num <= m.maxStream {
|
||||
return
|
||||
}
|
||||
m.maxStream = num
|
||||
m.blockedSent = false
|
||||
if m.maxStream < m.nextStream-1+protocol.StreamNum(len(m.openQueue)) {
|
||||
m.maybeSendBlockedFrame()
|
||||
}
|
||||
m.unblockOpenSync()
|
||||
}
|
||||
|
||||
// UpdateSendWindow is called when the peer's transport parameters are received.
|
||||
// Only in the case of a 0-RTT handshake will we have open streams at this point.
|
||||
// We might need to update the send window, in case the server increased it.
|
||||
func (m *outgoingBidiStreamsMap) UpdateSendWindow(limit protocol.ByteCount) {
|
||||
m.mutex.Lock()
|
||||
for _, str := range m.streams {
|
||||
str.updateSendWindow(limit)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
// unblockOpenSync unblocks the next OpenStreamSync go-routine to open a new stream
|
||||
func (m *outgoingBidiStreamsMap) unblockOpenSync() {
|
||||
if len(m.openQueue) == 0 {
|
||||
return
|
||||
}
|
||||
for qp := m.lowestInQueue; qp <= m.highestInQueue; qp++ {
|
||||
c, ok := m.openQueue[qp]
|
||||
if !ok { // entry was deleted because the context was canceled
|
||||
continue
|
||||
}
|
||||
// unblockOpenSync is called both from OpenStreamSync and from SetMaxStream.
|
||||
// It's sufficient to only unblock OpenStreamSync once.
|
||||
select {
|
||||
case c <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (m *outgoingBidiStreamsMap) CloseWithError(err error) {
|
||||
m.mutex.Lock()
|
||||
m.closeErr = err
|
||||
for _, str := range m.streams {
|
||||
str.closeForShutdown(err)
|
||||
}
|
||||
for _, c := range m.openQueue {
|
||||
if c != nil {
|
||||
close(c)
|
||||
}
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
}
|
|
@ -1,224 +0,0 @@
|
|||
package quic
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/lucas-clemente/quic-go/internal/protocol"
|
||||
"github.com/lucas-clemente/quic-go/internal/wire"
|
||||
)
|
||||
|
||||
//go:generate genny -in $GOFILE -out streams_map_outgoing_bidi.go gen "item=streamI Item=BidiStream streamTypeGeneric=protocol.StreamTypeBidi"
|
||||
//go:generate genny -in $GOFILE -out streams_map_outgoing_uni.go gen "item=sendStreamI Item=UniStream streamTypeGeneric=protocol.StreamTypeUni"
|
||||
type outgoingItemsMap struct {
|
||||
mutex sync.RWMutex
|
||||
|
||||
streams map[protocol.StreamNum]item
|
||||
|
||||
openQueue map[uint64]chan struct{}
|
||||
lowestInQueue uint64
|
||||
highestInQueue uint64
|
||||
|
||||
nextStream protocol.StreamNum // stream ID of the stream returned by OpenStream(Sync)
|
||||
maxStream protocol.StreamNum // the maximum stream ID we're allowed to open
|
||||
blockedSent bool // was a STREAMS_BLOCKED sent for the current maxStream
|
||||
|
||||
newStream func(protocol.StreamNum) item
|
||||
queueStreamIDBlocked func(*wire.StreamsBlockedFrame)
|
||||
|
||||
closeErr error
|
||||
}
|
||||
|
||||
func newOutgoingItemsMap(
|
||||
newStream func(protocol.StreamNum) item,
|
||||
queueControlFrame func(wire.Frame),
|
||||
) *outgoingItemsMap {
|
||||
return &outgoingItemsMap{
|
||||
streams: make(map[protocol.StreamNum]item),
|
||||
openQueue: make(map[uint64]chan struct{}),
|
||||
maxStream: protocol.InvalidStreamNum,
|
||||
nextStream: 1,
|
||||
newStream: newStream,
|
||||
queueStreamIDBlocked: func(f *wire.StreamsBlockedFrame) { queueControlFrame(f) },
|
||||
}
|
||||
}
|
||||
|
||||
func (m *outgoingItemsMap) OpenStream() (item, error) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
if m.closeErr != nil {
|
||||
return nil, m.closeErr
|
||||
}
|
||||
|
||||
// if there are OpenStreamSync calls waiting, return an error here
|
||||
if len(m.openQueue) > 0 || m.nextStream > m.maxStream {
|
||||
m.maybeSendBlockedFrame()
|
||||
return nil, streamOpenErr{errTooManyOpenStreams}
|
||||
}
|
||||
return m.openStream(), nil
|
||||
}
|
||||
|
||||
func (m *outgoingItemsMap) OpenStreamSync(ctx context.Context) (item, error) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
if m.closeErr != nil {
|
||||
return nil, m.closeErr
|
||||
}
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(m.openQueue) == 0 && m.nextStream <= m.maxStream {
|
||||
return m.openStream(), nil
|
||||
}
|
||||
|
||||
waitChan := make(chan struct{}, 1)
|
||||
queuePos := m.highestInQueue
|
||||
m.highestInQueue++
|
||||
if len(m.openQueue) == 0 {
|
||||
m.lowestInQueue = queuePos
|
||||
}
|
||||
m.openQueue[queuePos] = waitChan
|
||||
m.maybeSendBlockedFrame()
|
||||
|
||||
for {
|
||||
m.mutex.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
m.mutex.Lock()
|
||||
delete(m.openQueue, queuePos)
|
||||
return nil, ctx.Err()
|
||||
case <-waitChan:
|
||||
}
|
||||
m.mutex.Lock()
|
||||
|
||||
if m.closeErr != nil {
|
||||
return nil, m.closeErr
|
||||
}
|
||||
if m.nextStream > m.maxStream {
|
||||
// no stream available. Continue waiting
|
||||
continue
|
||||
}
|
||||
str := m.openStream()
|
||||
delete(m.openQueue, queuePos)
|
||||
m.lowestInQueue = queuePos + 1
|
||||
m.unblockOpenSync()
|
||||
return str, nil
|
||||
}
|
||||
}
|
||||
|
||||
func (m *outgoingItemsMap) openStream() item {
|
||||
s := m.newStream(m.nextStream)
|
||||
m.streams[m.nextStream] = s
|
||||
m.nextStream++
|
||||
return s
|
||||
}
|
||||
|
||||
// maybeSendBlockedFrame queues a STREAMS_BLOCKED frame for the current stream offset,
|
||||
// if we haven't sent one for this offset yet
|
||||
func (m *outgoingItemsMap) maybeSendBlockedFrame() {
|
||||
if m.blockedSent {
|
||||
return
|
||||
}
|
||||
|
||||
var streamNum protocol.StreamNum
|
||||
if m.maxStream != protocol.InvalidStreamNum {
|
||||
streamNum = m.maxStream
|
||||
}
|
||||
m.queueStreamIDBlocked(&wire.StreamsBlockedFrame{
|
||||
Type: streamTypeGeneric,
|
||||
StreamLimit: streamNum,
|
||||
})
|
||||
m.blockedSent = true
|
||||
}
|
||||
|
||||
func (m *outgoingItemsMap) GetStream(num protocol.StreamNum) (item, error) {
|
||||
m.mutex.RLock()
|
||||
if num >= m.nextStream {
|
||||
m.mutex.RUnlock()
|
||||
return nil, streamError{
|
||||
message: "peer attempted to open stream %d",
|
||||
nums: []protocol.StreamNum{num},
|
||||
}
|
||||
}
|
||||
s := m.streams[num]
|
||||
m.mutex.RUnlock()
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (m *outgoingItemsMap) DeleteStream(num protocol.StreamNum) error {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
if _, ok := m.streams[num]; !ok {
|
||||
return streamError{
|
||||
message: "tried to delete unknown outgoing stream %d",
|
||||
nums: []protocol.StreamNum{num},
|
||||
}
|
||||
}
|
||||
delete(m.streams, num)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *outgoingItemsMap) SetMaxStream(num protocol.StreamNum) {
|
||||
m.mutex.Lock()
|
||||
defer m.mutex.Unlock()
|
||||
|
||||
if num <= m.maxStream {
|
||||
return
|
||||
}
|
||||
m.maxStream = num
|
||||
m.blockedSent = false
|
||||
if m.maxStream < m.nextStream-1+protocol.StreamNum(len(m.openQueue)) {
|
||||
m.maybeSendBlockedFrame()
|
||||
}
|
||||
m.unblockOpenSync()
|
||||
}
|
||||
|
||||
// UpdateSendWindow is called when the peer's transport parameters are received.
|
||||
// Only in the case of a 0-RTT handshake will we have open streams at this point.
|
||||
// We might need to update the send window, in case the server increased it.
|
||||
func (m *outgoingItemsMap) UpdateSendWindow(limit protocol.ByteCount) {
|
||||
m.mutex.Lock()
|
||||
for _, str := range m.streams {
|
||||
str.updateSendWindow(limit)
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
}
|
||||
|
||||
// unblockOpenSync unblocks the next OpenStreamSync go-routine to open a new stream
|
||||
func (m *outgoingItemsMap) unblockOpenSync() {
|
||||
if len(m.openQueue) == 0 {
|
||||
return
|
||||
}
|
||||
for qp := m.lowestInQueue; qp <= m.highestInQueue; qp++ {
|
||||
c, ok := m.openQueue[qp]
|
||||
if !ok { // entry was deleted because the context was canceled
|
||||
continue
|
||||
}
|
||||
// unblockOpenSync is called both from OpenStreamSync and from SetMaxStream.
|
||||
// It's sufficient to only unblock OpenStreamSync once.
|
||||
select {
|
||||
case c <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (m *outgoingItemsMap) CloseWithError(err error) {
|
||||
m.mutex.Lock()
|
||||
m.closeErr = err
|
||||
for _, str := range m.streams {
|
||||
str.closeForShutdown(err)
|
||||
}
|
||||
for _, c := range m.openQueue {
|
||||
if c != nil {
|
||||
close(c)
|
||||
}
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
}
|
|
@ -1,9 +0,0 @@
|
|||
//go:build tools
|
||||
// +build tools
|
||||
|
||||
package quic
|
||||
|
||||
import (
|
||||
_ "github.com/cheekybits/genny"
|
||||
_ "github.com/onsi/ginkgo/ginkgo"
|
||||
)
|
|
@ -1,6 +0,0 @@
|
|||
# qtls
|
||||
|
||||
[![Godoc Reference](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat-square)](https://godoc.org/github.com/marten-seemann/qtls)
|
||||
[![CircleCI Build Status](https://img.shields.io/circleci/project/github/marten-seemann/qtls.svg?style=flat-square&label=CircleCI+build)](https://circleci.com/gh/marten-seemann/qtls)
|
||||
|
||||
This repository contains a modified version of the standard library's TLS implementation, modified for the QUIC protocol. It is used by [quic-go](https://github.com/lucas-clemente/quic-go).
|
|
@ -1,289 +0,0 @@
|
|||
// Copyright 2017 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package qtls
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
"crypto/ecdsa"
|
||||
"crypto/ed25519"
|
||||
"crypto/elliptic"
|
||||
"crypto/rsa"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash"
|
||||
"io"
|
||||
)
|
||||
|
||||
// verifyHandshakeSignature verifies a signature against pre-hashed
|
||||
// (if required) handshake contents.
|
||||
func verifyHandshakeSignature(sigType uint8, pubkey crypto.PublicKey, hashFunc crypto.Hash, signed, sig []byte) error {
|
||||
switch sigType {
|
||||
case signatureECDSA:
|
||||
pubKey, ok := pubkey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected an ECDSA public key, got %T", pubkey)
|
||||
}
|
||||
if !ecdsa.VerifyASN1(pubKey, signed, sig) {
|
||||
return errors.New("ECDSA verification failure")
|
||||
}
|
||||
case signatureEd25519:
|
||||
pubKey, ok := pubkey.(ed25519.PublicKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected an Ed25519 public key, got %T", pubkey)
|
||||
}
|
||||
if !ed25519.Verify(pubKey, signed, sig) {
|
||||
return errors.New("Ed25519 verification failure")
|
||||
}
|
||||
case signaturePKCS1v15:
|
||||
pubKey, ok := pubkey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected an RSA public key, got %T", pubkey)
|
||||
}
|
||||
if err := rsa.VerifyPKCS1v15(pubKey, hashFunc, signed, sig); err != nil {
|
||||
return err
|
||||
}
|
||||
case signatureRSAPSS:
|
||||
pubKey, ok := pubkey.(*rsa.PublicKey)
|
||||
if !ok {
|
||||
return fmt.Errorf("expected an RSA public key, got %T", pubkey)
|
||||
}
|
||||
signOpts := &rsa.PSSOptions{SaltLength: rsa.PSSSaltLengthEqualsHash}
|
||||
if err := rsa.VerifyPSS(pubKey, hashFunc, signed, sig, signOpts); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errors.New("internal error: unknown signature type")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
const (
|
||||
serverSignatureContext = "TLS 1.3, server CertificateVerify\x00"
|
||||
clientSignatureContext = "TLS 1.3, client CertificateVerify\x00"
|
||||
)
|
||||
|
||||
var signaturePadding = []byte{
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
|
||||
}
|
||||
|
||||
// signedMessage returns the pre-hashed (if necessary) message to be signed by
|
||||
// certificate keys in TLS 1.3. See RFC 8446, Section 4.4.3.
|
||||
func signedMessage(sigHash crypto.Hash, context string, transcript hash.Hash) []byte {
|
||||
if sigHash == directSigning {
|
||||
b := &bytes.Buffer{}
|
||||
b.Write(signaturePadding)
|
||||
io.WriteString(b, context)
|
||||
b.Write(transcript.Sum(nil))
|
||||
return b.Bytes()
|
||||
}
|
||||
h := sigHash.New()
|
||||
h.Write(signaturePadding)
|
||||
io.WriteString(h, context)
|
||||
h.Write(transcript.Sum(nil))
|
||||
return h.Sum(nil)
|
||||
}
|
||||
|
||||
// typeAndHashFromSignatureScheme returns the corresponding signature type and
|
||||
// crypto.Hash for a given TLS SignatureScheme.
|
||||
func typeAndHashFromSignatureScheme(signatureAlgorithm SignatureScheme) (sigType uint8, hash crypto.Hash, err error) {
|
||||
switch signatureAlgorithm {
|
||||
case PKCS1WithSHA1, PKCS1WithSHA256, PKCS1WithSHA384, PKCS1WithSHA512:
|
||||
sigType = signaturePKCS1v15
|
||||
case PSSWithSHA256, PSSWithSHA384, PSSWithSHA512:
|
||||
sigType = signatureRSAPSS
|
||||
case ECDSAWithSHA1, ECDSAWithP256AndSHA256, ECDSAWithP384AndSHA384, ECDSAWithP521AndSHA512:
|
||||
sigType = signatureECDSA
|
||||
case Ed25519:
|
||||
sigType = signatureEd25519
|
||||
default:
|
||||
return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm)
|
||||
}
|
||||
switch signatureAlgorithm {
|
||||
case PKCS1WithSHA1, ECDSAWithSHA1:
|
||||
hash = crypto.SHA1
|
||||
case PKCS1WithSHA256, PSSWithSHA256, ECDSAWithP256AndSHA256:
|
||||
hash = crypto.SHA256
|
||||
case PKCS1WithSHA384, PSSWithSHA384, ECDSAWithP384AndSHA384:
|
||||
hash = crypto.SHA384
|
||||
case PKCS1WithSHA512, PSSWithSHA512, ECDSAWithP521AndSHA512:
|
||||
hash = crypto.SHA512
|
||||
case Ed25519:
|
||||
hash = directSigning
|
||||
default:
|
||||
return 0, 0, fmt.Errorf("unsupported signature algorithm: %v", signatureAlgorithm)
|
||||
}
|
||||
return sigType, hash, nil
|
||||
}
|
||||
|
||||
// legacyTypeAndHashFromPublicKey returns the fixed signature type and crypto.Hash for
|
||||
// a given public key used with TLS 1.0 and 1.1, before the introduction of
|
||||
// signature algorithm negotiation.
|
||||
func legacyTypeAndHashFromPublicKey(pub crypto.PublicKey) (sigType uint8, hash crypto.Hash, err error) {
|
||||
switch pub.(type) {
|
||||
case *rsa.PublicKey:
|
||||
return signaturePKCS1v15, crypto.MD5SHA1, nil
|
||||
case *ecdsa.PublicKey:
|
||||
return signatureECDSA, crypto.SHA1, nil
|
||||
case ed25519.PublicKey:
|
||||
// RFC 8422 specifies support for Ed25519 in TLS 1.0 and 1.1,
|
||||
// but it requires holding on to a handshake transcript to do a
|
||||
// full signature, and not even OpenSSL bothers with the
|
||||
// complexity, so we can't even test it properly.
|
||||
return 0, 0, fmt.Errorf("tls: Ed25519 public keys are not supported before TLS 1.2")
|
||||
default:
|
||||
return 0, 0, fmt.Errorf("tls: unsupported public key: %T", pub)
|
||||
}
|
||||
}
|
||||
|
||||
var rsaSignatureSchemes = []struct {
|
||||
scheme SignatureScheme
|
||||
minModulusBytes int
|
||||
maxVersion uint16
|
||||
}{
|
||||
// RSA-PSS is used with PSSSaltLengthEqualsHash, and requires
|
||||
// emLen >= hLen + sLen + 2
|
||||
{PSSWithSHA256, crypto.SHA256.Size()*2 + 2, VersionTLS13},
|
||||
{PSSWithSHA384, crypto.SHA384.Size()*2 + 2, VersionTLS13},
|
||||
{PSSWithSHA512, crypto.SHA512.Size()*2 + 2, VersionTLS13},
|
||||
// PKCS #1 v1.5 uses prefixes from hashPrefixes in crypto/rsa, and requires
|
||||
// emLen >= len(prefix) + hLen + 11
|
||||
// TLS 1.3 dropped support for PKCS #1 v1.5 in favor of RSA-PSS.
|
||||
{PKCS1WithSHA256, 19 + crypto.SHA256.Size() + 11, VersionTLS12},
|
||||
{PKCS1WithSHA384, 19 + crypto.SHA384.Size() + 11, VersionTLS12},
|
||||
{PKCS1WithSHA512, 19 + crypto.SHA512.Size() + 11, VersionTLS12},
|
||||
{PKCS1WithSHA1, 15 + crypto.SHA1.Size() + 11, VersionTLS12},
|
||||
}
|
||||
|
||||
// signatureSchemesForCertificate returns the list of supported SignatureSchemes
|
||||
// for a given certificate, based on the public key and the protocol version,
|
||||
// and optionally filtered by its explicit SupportedSignatureAlgorithms.
|
||||
//
|
||||
// This function must be kept in sync with supportedSignatureAlgorithms.
|
||||
func signatureSchemesForCertificate(version uint16, cert *Certificate) []SignatureScheme {
|
||||
priv, ok := cert.PrivateKey.(crypto.Signer)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
var sigAlgs []SignatureScheme
|
||||
switch pub := priv.Public().(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
if version != VersionTLS13 {
|
||||
// In TLS 1.2 and earlier, ECDSA algorithms are not
|
||||
// constrained to a single curve.
|
||||
sigAlgs = []SignatureScheme{
|
||||
ECDSAWithP256AndSHA256,
|
||||
ECDSAWithP384AndSHA384,
|
||||
ECDSAWithP521AndSHA512,
|
||||
ECDSAWithSHA1,
|
||||
}
|
||||
break
|
||||
}
|
||||
switch pub.Curve {
|
||||
case elliptic.P256():
|
||||
sigAlgs = []SignatureScheme{ECDSAWithP256AndSHA256}
|
||||
case elliptic.P384():
|
||||
sigAlgs = []SignatureScheme{ECDSAWithP384AndSHA384}
|
||||
case elliptic.P521():
|
||||
sigAlgs = []SignatureScheme{ECDSAWithP521AndSHA512}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
case *rsa.PublicKey:
|
||||
size := pub.Size()
|
||||
sigAlgs = make([]SignatureScheme, 0, len(rsaSignatureSchemes))
|
||||
for _, candidate := range rsaSignatureSchemes {
|
||||
if size >= candidate.minModulusBytes && version <= candidate.maxVersion {
|
||||
sigAlgs = append(sigAlgs, candidate.scheme)
|
||||
}
|
||||
}
|
||||
case ed25519.PublicKey:
|
||||
sigAlgs = []SignatureScheme{Ed25519}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
||||
if cert.SupportedSignatureAlgorithms != nil {
|
||||
var filteredSigAlgs []SignatureScheme
|
||||
for _, sigAlg := range sigAlgs {
|
||||
if isSupportedSignatureAlgorithm(sigAlg, cert.SupportedSignatureAlgorithms) {
|
||||
filteredSigAlgs = append(filteredSigAlgs, sigAlg)
|
||||
}
|
||||
}
|
||||
return filteredSigAlgs
|
||||
}
|
||||
return sigAlgs
|
||||
}
|
||||
|
||||
// selectSignatureScheme picks a SignatureScheme from the peer's preference list
|
||||
// that works with the selected certificate. It's only called for protocol
|
||||
// versions that support signature algorithms, so TLS 1.2 and 1.3.
|
||||
func selectSignatureScheme(vers uint16, c *Certificate, peerAlgs []SignatureScheme) (SignatureScheme, error) {
|
||||
supportedAlgs := signatureSchemesForCertificate(vers, c)
|
||||
if len(supportedAlgs) == 0 {
|
||||
return 0, unsupportedCertificateError(c)
|
||||
}
|
||||
if len(peerAlgs) == 0 && vers == VersionTLS12 {
|
||||
// For TLS 1.2, if the client didn't send signature_algorithms then we
|
||||
// can assume that it supports SHA1. See RFC 5246, Section 7.4.1.4.1.
|
||||
peerAlgs = []SignatureScheme{PKCS1WithSHA1, ECDSAWithSHA1}
|
||||
}
|
||||
// Pick signature scheme in the peer's preference order, as our
|
||||
// preference order is not configurable.
|
||||
for _, preferredAlg := range peerAlgs {
|
||||
if isSupportedSignatureAlgorithm(preferredAlg, supportedAlgs) {
|
||||
return preferredAlg, nil
|
||||
}
|
||||
}
|
||||
return 0, errors.New("tls: peer doesn't support any of the certificate's signature algorithms")
|
||||
}
|
||||
|
||||
// unsupportedCertificateError returns a helpful error for certificates with
|
||||
// an unsupported private key.
|
||||
func unsupportedCertificateError(cert *Certificate) error {
|
||||
switch cert.PrivateKey.(type) {
|
||||
case rsa.PrivateKey, ecdsa.PrivateKey:
|
||||
return fmt.Errorf("tls: unsupported certificate: private key is %T, expected *%T",
|
||||
cert.PrivateKey, cert.PrivateKey)
|
||||
case *ed25519.PrivateKey:
|
||||
return fmt.Errorf("tls: unsupported certificate: private key is *ed25519.PrivateKey, expected ed25519.PrivateKey")
|
||||
}
|
||||
|
||||
signer, ok := cert.PrivateKey.(crypto.Signer)
|
||||
if !ok {
|
||||
return fmt.Errorf("tls: certificate private key (%T) does not implement crypto.Signer",
|
||||
cert.PrivateKey)
|
||||
}
|
||||
|
||||
switch pub := signer.Public().(type) {
|
||||
case *ecdsa.PublicKey:
|
||||
switch pub.Curve {
|
||||
case elliptic.P256():
|
||||
case elliptic.P384():
|
||||
case elliptic.P521():
|
||||
default:
|
||||
return fmt.Errorf("tls: unsupported certificate curve (%s)", pub.Curve.Params().Name)
|
||||
}
|
||||
case *rsa.PublicKey:
|
||||
return fmt.Errorf("tls: certificate RSA key size too small for supported signature algorithms")
|
||||
case ed25519.PublicKey:
|
||||
default:
|
||||
return fmt.Errorf("tls: unsupported certificate key (%T)", pub)
|
||||
}
|
||||
|
||||
if cert.SupportedSignatureAlgorithms != nil {
|
||||
return fmt.Errorf("tls: peer doesn't support the certificate custom signature algorithms")
|
||||
}
|
||||
|
||||
return fmt.Errorf("tls: internal error: unsupported key (%T)", cert.PrivateKey)
|
||||
}
|
|
@ -1,532 +0,0 @@
|
|||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package qtls
|
||||
|
||||
import (
|
||||
"crypto"
|
||||
"crypto/aes"
|
||||
"crypto/cipher"
|
||||
"crypto/des"
|
||||
"crypto/hmac"
|
||||
"crypto/rc4"
|
||||
"crypto/sha1"
|
||||
"crypto/sha256"
|
||||
"crypto/x509"
|
||||
"fmt"
|
||||
"hash"
|
||||
|
||||
"golang.org/x/crypto/chacha20poly1305"
|
||||
)
|
||||
|
||||
// CipherSuite is a TLS cipher suite. Note that most functions in this package
|
||||
// accept and expose cipher suite IDs instead of this type.
|
||||
type CipherSuite struct {
|
||||
ID uint16
|
||||
Name string
|
||||
|
||||
// Supported versions is the list of TLS protocol versions that can
|
||||
// negotiate this cipher suite.
|
||||
SupportedVersions []uint16
|
||||
|
||||
// Insecure is true if the cipher suite has known security issues
|
||||
// due to its primitives, design, or implementation.
|
||||
Insecure bool
|
||||
}
|
||||
|
||||
var (
|
||||
supportedUpToTLS12 = []uint16{VersionTLS10, VersionTLS11, VersionTLS12}
|
||||
supportedOnlyTLS12 = []uint16{VersionTLS12}
|
||||
supportedOnlyTLS13 = []uint16{VersionTLS13}
|
||||
)
|
||||
|
||||
// CipherSuites returns a list of cipher suites currently implemented by this
|
||||
// package, excluding those with security issues, which are returned by
|
||||
// InsecureCipherSuites.
|
||||
//
|
||||
// The list is sorted by ID. Note that the default cipher suites selected by
|
||||
// this package might depend on logic that can't be captured by a static list.
|
||||
func CipherSuites() []*CipherSuite {
|
||||
return []*CipherSuite{
|
||||
{TLS_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, false},
|
||||
{TLS_RSA_WITH_AES_128_CBC_SHA, "TLS_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
|
||||
{TLS_RSA_WITH_AES_256_CBC_SHA, "TLS_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
|
||||
{TLS_RSA_WITH_AES_128_GCM_SHA256, "TLS_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
|
||||
{TLS_RSA_WITH_AES_256_GCM_SHA384, "TLS_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
|
||||
|
||||
{TLS_AES_128_GCM_SHA256, "TLS_AES_128_GCM_SHA256", supportedOnlyTLS13, false},
|
||||
{TLS_AES_256_GCM_SHA384, "TLS_AES_256_GCM_SHA384", supportedOnlyTLS13, false},
|
||||
{TLS_CHACHA20_POLY1305_SHA256, "TLS_CHACHA20_POLY1305_SHA256", supportedOnlyTLS13, false},
|
||||
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
|
||||
{TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, "TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA", supportedUpToTLS12, false},
|
||||
{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA", supportedUpToTLS12, false},
|
||||
{TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, "TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", supportedUpToTLS12, false},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
|
||||
{TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", supportedOnlyTLS12, false},
|
||||
{TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, "TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", supportedOnlyTLS12, false},
|
||||
{TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", supportedOnlyTLS12, false},
|
||||
{TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256, "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256", supportedOnlyTLS12, false},
|
||||
}
|
||||
}
|
||||
|
||||
// InsecureCipherSuites returns a list of cipher suites currently implemented by
|
||||
// this package and which have security issues.
|
||||
//
|
||||
// Most applications should not use the cipher suites in this list, and should
|
||||
// only use those returned by CipherSuites.
|
||||
func InsecureCipherSuites() []*CipherSuite {
|
||||
// RC4 suites are broken because RC4 is.
|
||||
// CBC-SHA256 suites have no Lucky13 countermeasures.
|
||||
return []*CipherSuite{
|
||||
{TLS_RSA_WITH_RC4_128_SHA, "TLS_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
|
||||
{TLS_RSA_WITH_AES_128_CBC_SHA256, "TLS_RSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
|
||||
{TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, "TLS_ECDHE_ECDSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
|
||||
{TLS_ECDHE_RSA_WITH_RC4_128_SHA, "TLS_ECDHE_RSA_WITH_RC4_128_SHA", supportedUpToTLS12, true},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
|
||||
{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256", supportedOnlyTLS12, true},
|
||||
}
|
||||
}
|
||||
|
||||
// CipherSuiteName returns the standard name for the passed cipher suite ID
|
||||
// (e.g. "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256"), or a fallback representation
|
||||
// of the ID value if the cipher suite is not implemented by this package.
|
||||
func CipherSuiteName(id uint16) string {
|
||||
for _, c := range CipherSuites() {
|
||||
if c.ID == id {
|
||||
return c.Name
|
||||
}
|
||||
}
|
||||
for _, c := range InsecureCipherSuites() {
|
||||
if c.ID == id {
|
||||
return c.Name
|
||||
}
|
||||
}
|
||||
return fmt.Sprintf("0x%04X", id)
|
||||
}
|
||||
|
||||
// a keyAgreement implements the client and server side of a TLS key agreement
|
||||
// protocol by generating and processing key exchange messages.
|
||||
type keyAgreement interface {
|
||||
// On the server side, the first two methods are called in order.
|
||||
|
||||
// In the case that the key agreement protocol doesn't use a
|
||||
// ServerKeyExchange message, generateServerKeyExchange can return nil,
|
||||
// nil.
|
||||
generateServerKeyExchange(*config, *Certificate, *clientHelloMsg, *serverHelloMsg) (*serverKeyExchangeMsg, error)
|
||||
processClientKeyExchange(*config, *Certificate, *clientKeyExchangeMsg, uint16) ([]byte, error)
|
||||
|
||||
// On the client side, the next two methods are called in order.
|
||||
|
||||
// This method may not be called if the server doesn't send a
|
||||
// ServerKeyExchange message.
|
||||
processServerKeyExchange(*config, *clientHelloMsg, *serverHelloMsg, *x509.Certificate, *serverKeyExchangeMsg) error
|
||||
generateClientKeyExchange(*config, *clientHelloMsg, *x509.Certificate) ([]byte, *clientKeyExchangeMsg, error)
|
||||
}
|
||||
|
||||
const (
|
||||
// suiteECDHE indicates that the cipher suite involves elliptic curve
|
||||
// Diffie-Hellman. This means that it should only be selected when the
|
||||
// client indicates that it supports ECC with a curve and point format
|
||||
// that we're happy with.
|
||||
suiteECDHE = 1 << iota
|
||||
// suiteECSign indicates that the cipher suite involves an ECDSA or
|
||||
// EdDSA signature and therefore may only be selected when the server's
|
||||
// certificate is ECDSA or EdDSA. If this is not set then the cipher suite
|
||||
// is RSA based.
|
||||
suiteECSign
|
||||
// suiteTLS12 indicates that the cipher suite should only be advertised
|
||||
// and accepted when using TLS 1.2.
|
||||
suiteTLS12
|
||||
// suiteSHA384 indicates that the cipher suite uses SHA384 as the
|
||||
// handshake hash.
|
||||
suiteSHA384
|
||||
// suiteDefaultOff indicates that this cipher suite is not included by
|
||||
// default.
|
||||
suiteDefaultOff
|
||||
)
|
||||
|
||||
// A cipherSuite is a specific combination of key agreement, cipher and MAC function.
|
||||
type cipherSuite struct {
|
||||
id uint16
|
||||
// the lengths, in bytes, of the key material needed for each component.
|
||||
keyLen int
|
||||
macLen int
|
||||
ivLen int
|
||||
ka func(version uint16) keyAgreement
|
||||
// flags is a bitmask of the suite* values, above.
|
||||
flags int
|
||||
cipher func(key, iv []byte, isRead bool) interface{}
|
||||
mac func(key []byte) hash.Hash
|
||||
aead func(key, fixedNonce []byte) aead
|
||||
}
|
||||
|
||||
var cipherSuites = []*cipherSuite{
|
||||
// Ciphersuite order is chosen so that ECDHE comes before plain RSA and
|
||||
// AEADs are the top preference.
|
||||
{TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
|
||||
{TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305, 32, 0, 12, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadChaCha20Poly1305},
|
||||
{TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12, nil, nil, aeadAESGCM},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12, nil, nil, aeadAESGCM},
|
||||
{TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
|
||||
{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheRSAKA, suiteECDHE | suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil},
|
||||
{TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, 16, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherAES, macSHA1, nil},
|
||||
{TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheRSAKA, suiteECDHE, cipherAES, macSHA1, nil},
|
||||
{TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, 32, 20, 16, ecdheECDSAKA, suiteECDHE | suiteECSign, cipherAES, macSHA1, nil},
|
||||
{TLS_RSA_WITH_AES_128_GCM_SHA256, 16, 0, 4, rsaKA, suiteTLS12, nil, nil, aeadAESGCM},
|
||||
{TLS_RSA_WITH_AES_256_GCM_SHA384, 32, 0, 4, rsaKA, suiteTLS12 | suiteSHA384, nil, nil, aeadAESGCM},
|
||||
{TLS_RSA_WITH_AES_128_CBC_SHA256, 16, 32, 16, rsaKA, suiteTLS12 | suiteDefaultOff, cipherAES, macSHA256, nil},
|
||||
{TLS_RSA_WITH_AES_128_CBC_SHA, 16, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
|
||||
{TLS_RSA_WITH_AES_256_CBC_SHA, 32, 20, 16, rsaKA, 0, cipherAES, macSHA1, nil},
|
||||
{TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, ecdheRSAKA, suiteECDHE, cipher3DES, macSHA1, nil},
|
||||
{TLS_RSA_WITH_3DES_EDE_CBC_SHA, 24, 20, 8, rsaKA, 0, cipher3DES, macSHA1, nil},
|
||||
|
||||
// RC4-based cipher suites are disabled by default.
|
||||
{TLS_RSA_WITH_RC4_128_SHA, 16, 20, 0, rsaKA, suiteDefaultOff, cipherRC4, macSHA1, nil},
|
||||
{TLS_ECDHE_RSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheRSAKA, suiteECDHE | suiteDefaultOff, cipherRC4, macSHA1, nil},
|
||||
{TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, 16, 20, 0, ecdheECDSAKA, suiteECDHE | suiteECSign | suiteDefaultOff, cipherRC4, macSHA1, nil},
|
||||
}
|
||||
|
||||
// selectCipherSuite returns the first cipher suite from ids which is also in
|
||||
// supportedIDs and passes the ok filter.
|
||||
func selectCipherSuite(ids, supportedIDs []uint16, ok func(*cipherSuite) bool) *cipherSuite {
|
||||
for _, id := range ids {
|
||||
candidate := cipherSuiteByID(id)
|
||||
if candidate == nil || !ok(candidate) {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, suppID := range supportedIDs {
|
||||
if id == suppID {
|
||||
return candidate
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A cipherSuiteTLS13 defines only the pair of the AEAD algorithm and hash
|
||||
// algorithm to be used with HKDF. See RFC 8446, Appendix B.4.
|
||||
type cipherSuiteTLS13 struct {
|
||||
id uint16
|
||||
keyLen int
|
||||
aead func(key, fixedNonce []byte) aead
|
||||
hash crypto.Hash
|
||||
}
|
||||
|
||||
type CipherSuiteTLS13 struct {
|
||||
ID uint16
|
||||
KeyLen int
|
||||
Hash crypto.Hash
|
||||
AEAD func(key, fixedNonce []byte) cipher.AEAD
|
||||
}
|
||||
|
||||
func (c *CipherSuiteTLS13) IVLen() int {
|
||||
return aeadNonceLength
|
||||
}
|
||||
|
||||
var cipherSuitesTLS13 = []*cipherSuiteTLS13{
|
||||
{TLS_AES_128_GCM_SHA256, 16, aeadAESGCMTLS13, crypto.SHA256},
|
||||
{TLS_CHACHA20_POLY1305_SHA256, 32, aeadChaCha20Poly1305, crypto.SHA256},
|
||||
{TLS_AES_256_GCM_SHA384, 32, aeadAESGCMTLS13, crypto.SHA384},
|
||||
}
|
||||
|
||||
func cipherRC4(key, iv []byte, isRead bool) interface{} {
|
||||
cipher, _ := rc4.NewCipher(key)
|
||||
return cipher
|
||||
}
|
||||
|
||||
func cipher3DES(key, iv []byte, isRead bool) interface{} {
|
||||
block, _ := des.NewTripleDESCipher(key)
|
||||
if isRead {
|
||||
return cipher.NewCBCDecrypter(block, iv)
|
||||
}
|
||||
return cipher.NewCBCEncrypter(block, iv)
|
||||
}
|
||||
|
||||
func cipherAES(key, iv []byte, isRead bool) interface{} {
|
||||
block, _ := aes.NewCipher(key)
|
||||
if isRead {
|
||||
return cipher.NewCBCDecrypter(block, iv)
|
||||
}
|
||||
return cipher.NewCBCEncrypter(block, iv)
|
||||
}
|
||||
|
||||
// macSHA1 returns a SHA-1 based constant time MAC.
|
||||
func macSHA1(key []byte) hash.Hash {
|
||||
return hmac.New(newConstantTimeHash(sha1.New), key)
|
||||
}
|
||||
|
||||
// macSHA256 returns a SHA-256 based MAC. This is only supported in TLS 1.2 and
|
||||
// is currently only used in disabled-by-default cipher suites.
|
||||
func macSHA256(key []byte) hash.Hash {
|
||||
return hmac.New(sha256.New, key)
|
||||
}
|
||||
|
||||
type aead interface {
|
||||
cipher.AEAD
|
||||
|
||||
// explicitNonceLen returns the number of bytes of explicit nonce
|
||||
// included in each record. This is eight for older AEADs and
|
||||
// zero for modern ones.
|
||||
explicitNonceLen() int
|
||||
}
|
||||
|
||||
const (
|
||||
aeadNonceLength = 12
|
||||
noncePrefixLength = 4
|
||||
)
|
||||
|
||||
// prefixNonceAEAD wraps an AEAD and prefixes a fixed portion of the nonce to
|
||||
// each call.
|
||||
type prefixNonceAEAD struct {
|
||||
// nonce contains the fixed part of the nonce in the first four bytes.
|
||||
nonce [aeadNonceLength]byte
|
||||
aead cipher.AEAD
|
||||
}
|
||||
|
||||
func (f *prefixNonceAEAD) NonceSize() int { return aeadNonceLength - noncePrefixLength }
|
||||
func (f *prefixNonceAEAD) Overhead() int { return f.aead.Overhead() }
|
||||
func (f *prefixNonceAEAD) explicitNonceLen() int { return f.NonceSize() }
|
||||
|
||||
func (f *prefixNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
|
||||
copy(f.nonce[4:], nonce)
|
||||
return f.aead.Seal(out, f.nonce[:], plaintext, additionalData)
|
||||
}
|
||||
|
||||
func (f *prefixNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) {
|
||||
copy(f.nonce[4:], nonce)
|
||||
return f.aead.Open(out, f.nonce[:], ciphertext, additionalData)
|
||||
}
|
||||
|
||||
// xoredNonceAEAD wraps an AEAD by XORing in a fixed pattern to the nonce
|
||||
// before each call.
|
||||
type xorNonceAEAD struct {
|
||||
nonceMask [aeadNonceLength]byte
|
||||
aead cipher.AEAD
|
||||
}
|
||||
|
||||
func (f *xorNonceAEAD) NonceSize() int { return 8 } // 64-bit sequence number
|
||||
func (f *xorNonceAEAD) Overhead() int { return f.aead.Overhead() }
|
||||
func (f *xorNonceAEAD) explicitNonceLen() int { return 0 }
|
||||
|
||||
func (f *xorNonceAEAD) Seal(out, nonce, plaintext, additionalData []byte) []byte {
|
||||
for i, b := range nonce {
|
||||
f.nonceMask[4+i] ^= b
|
||||
}
|
||||
result := f.aead.Seal(out, f.nonceMask[:], plaintext, additionalData)
|
||||
for i, b := range nonce {
|
||||
f.nonceMask[4+i] ^= b
|
||||
}
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
func (f *xorNonceAEAD) Open(out, nonce, ciphertext, additionalData []byte) ([]byte, error) {
|
||||
for i, b := range nonce {
|
||||
f.nonceMask[4+i] ^= b
|
||||
}
|
||||
result, err := f.aead.Open(out, f.nonceMask[:], ciphertext, additionalData)
|
||||
for i, b := range nonce {
|
||||
f.nonceMask[4+i] ^= b
|
||||
}
|
||||
|
||||
return result, err
|
||||
}
|
||||
|
||||
func aeadAESGCM(key, noncePrefix []byte) aead {
|
||||
if len(noncePrefix) != noncePrefixLength {
|
||||
panic("tls: internal error: wrong nonce length")
|
||||
}
|
||||
aes, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
aead, err := cipher.NewGCM(aes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ret := &prefixNonceAEAD{aead: aead}
|
||||
copy(ret.nonce[:], noncePrefix)
|
||||
return ret
|
||||
}
|
||||
|
||||
// AEADAESGCMTLS13 creates a new AES-GCM AEAD for TLS 1.3
|
||||
func AEADAESGCMTLS13(key, fixedNonce []byte) cipher.AEAD {
|
||||
return aeadAESGCMTLS13(key, fixedNonce)
|
||||
}
|
||||
|
||||
func aeadAESGCMTLS13(key, nonceMask []byte) aead {
|
||||
if len(nonceMask) != aeadNonceLength {
|
||||
panic("tls: internal error: wrong nonce length")
|
||||
}
|
||||
aes, err := aes.NewCipher(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
aead, err := cipher.NewGCM(aes)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ret := &xorNonceAEAD{aead: aead}
|
||||
copy(ret.nonceMask[:], nonceMask)
|
||||
return ret
|
||||
}
|
||||
|
||||
func aeadChaCha20Poly1305(key, nonceMask []byte) aead {
|
||||
if len(nonceMask) != aeadNonceLength {
|
||||
panic("tls: internal error: wrong nonce length")
|
||||
}
|
||||
aead, err := chacha20poly1305.New(key)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
ret := &xorNonceAEAD{aead: aead}
|
||||
copy(ret.nonceMask[:], nonceMask)
|
||||
return ret
|
||||
}
|
||||
|
||||
type constantTimeHash interface {
|
||||
hash.Hash
|
||||
ConstantTimeSum(b []byte) []byte
|
||||
}
|
||||
|
||||
// cthWrapper wraps any hash.Hash that implements ConstantTimeSum, and replaces
|
||||
// with that all calls to Sum. It's used to obtain a ConstantTimeSum-based HMAC.
|
||||
type cthWrapper struct {
|
||||
h constantTimeHash
|
||||
}
|
||||
|
||||
func (c *cthWrapper) Size() int { return c.h.Size() }
|
||||
func (c *cthWrapper) BlockSize() int { return c.h.BlockSize() }
|
||||
func (c *cthWrapper) Reset() { c.h.Reset() }
|
||||
func (c *cthWrapper) Write(p []byte) (int, error) { return c.h.Write(p) }
|
||||
func (c *cthWrapper) Sum(b []byte) []byte { return c.h.ConstantTimeSum(b) }
|
||||
|
||||
func newConstantTimeHash(h func() hash.Hash) func() hash.Hash {
|
||||
return func() hash.Hash {
|
||||
return &cthWrapper{h().(constantTimeHash)}
|
||||
}
|
||||
}
|
||||
|
||||
// tls10MAC implements the TLS 1.0 MAC function. RFC 2246, Section 6.2.3.
|
||||
func tls10MAC(h hash.Hash, out, seq, header, data, extra []byte) []byte {
|
||||
h.Reset()
|
||||
h.Write(seq)
|
||||
h.Write(header)
|
||||
h.Write(data)
|
||||
res := h.Sum(out)
|
||||
if extra != nil {
|
||||
h.Write(extra)
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func rsaKA(version uint16) keyAgreement {
|
||||
return rsaKeyAgreement{}
|
||||
}
|
||||
|
||||
func ecdheECDSAKA(version uint16) keyAgreement {
|
||||
return &ecdheKeyAgreement{
|
||||
isRSA: false,
|
||||
version: version,
|
||||
}
|
||||
}
|
||||
|
||||
func ecdheRSAKA(version uint16) keyAgreement {
|
||||
return &ecdheKeyAgreement{
|
||||
isRSA: true,
|
||||
version: version,
|
||||
}
|
||||
}
|
||||
|
||||
// mutualCipherSuite returns a cipherSuite given a list of supported
|
||||
// ciphersuites and the id requested by the peer.
|
||||
func mutualCipherSuite(have []uint16, want uint16) *cipherSuite {
|
||||
for _, id := range have {
|
||||
if id == want {
|
||||
return cipherSuiteByID(id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cipherSuiteByID(id uint16) *cipherSuite {
|
||||
for _, cipherSuite := range cipherSuites {
|
||||
if cipherSuite.id == id {
|
||||
return cipherSuite
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func mutualCipherSuiteTLS13(have []uint16, want uint16) *cipherSuiteTLS13 {
|
||||
for _, id := range have {
|
||||
if id == want {
|
||||
return cipherSuiteTLS13ByID(id)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func cipherSuiteTLS13ByID(id uint16) *cipherSuiteTLS13 {
|
||||
for _, cipherSuite := range cipherSuitesTLS13 {
|
||||
if cipherSuite.id == id {
|
||||
return cipherSuite
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// A list of cipher suite IDs that are, or have been, implemented by this
|
||||
// package.
|
||||
//
|
||||
// See https://www.iana.org/assignments/tls-parameters/tls-parameters.xml
|
||||
const (
|
||||
// TLS 1.0 - 1.2 cipher suites.
|
||||
TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005
|
||||
TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000a
|
||||
TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002f
|
||||
TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035
|
||||
TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003c
|
||||
TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009c
|
||||
TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009d
|
||||
TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xc007
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xc009
|
||||
TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xc00a
|
||||
TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xc011
|
||||
TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xc012
|
||||
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xc013
|
||||
TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xc014
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc023
|
||||
TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xc027
|
||||
TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02f
|
||||
TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xc02b
|
||||
TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc030
|
||||
TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xc02c
|
||||
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xcca8
|
||||
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xcca9
|
||||
|
||||
// TLS 1.3 cipher suites.
|
||||
TLS_AES_128_GCM_SHA256 uint16 = 0x1301
|
||||
TLS_AES_256_GCM_SHA384 uint16 = 0x1302
|
||||
TLS_CHACHA20_POLY1305_SHA256 uint16 = 0x1303
|
||||
|
||||
// TLS_FALLBACK_SCSV isn't a standard cipher suite but an indicator
|
||||
// that the client is doing version fallback. See RFC 7507.
|
||||
TLS_FALLBACK_SCSV uint16 = 0x5600
|
||||
|
||||
// Legacy names for the corresponding cipher suites with the correct _SHA256
|
||||
// suffix, retained for backward compatibility.
|
||||
TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305 = TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256
|
||||
TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305 = TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256
|
||||
)
|
File diff suppressed because it is too large
Load Diff
|
@ -1,12 +0,0 @@
|
|||
// +build js
|
||||
|
||||
package qtls
|
||||
|
||||
var (
|
||||
hasGCMAsmAMD64 = false
|
||||
hasGCMAsmARM64 = false
|
||||
// Keep in sync with crypto/aes/cipher_s390x.go.
|
||||
hasGCMAsmS390X = false
|
||||
|
||||
hasAESGCMHardwareSupport = false
|
||||
)
|
|
@ -1,20 +0,0 @@
|
|||
// +build !js
|
||||
|
||||
package qtls
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
|
||||
"golang.org/x/sys/cpu"
|
||||
)
|
||||
|
||||
var (
|
||||
hasGCMAsmAMD64 = cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ
|
||||
hasGCMAsmARM64 = cpu.ARM64.HasAES && cpu.ARM64.HasPMULL
|
||||
// Keep in sync with crypto/aes/cipher_s390x.go.
|
||||
hasGCMAsmS390X = cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM)
|
||||
|
||||
hasAESGCMHardwareSupport = runtime.GOARCH == "amd64" && hasGCMAsmAMD64 ||
|
||||
runtime.GOARCH == "arm64" && hasGCMAsmARM64 ||
|
||||
runtime.GOARCH == "s390x" && hasGCMAsmS390X
|
||||
)
|
File diff suppressed because it is too large
Load Diff
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue